aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'portage_with_autodep/pym')
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractDepPriority.py29
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py266
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractPollTask.py62
-rw-r--r--portage_with_autodep/pym/_emerge/AsynchronousLock.py288
-rw-r--r--portage_with_autodep/pym/_emerge/AsynchronousTask.py129
-rw-r--r--portage_with_autodep/pym/_emerge/AtomArg.py11
-rw-r--r--portage_with_autodep/pym/_emerge/Binpkg.py333
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py66
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py31
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgFetcher.py181
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py43
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgVerifier.py75
-rw-r--r--portage_with_autodep/pym/_emerge/Blocker.py15
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerCache.py182
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerDB.py124
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerDepPriority.py13
-rw-r--r--portage_with_autodep/pym/_emerge/CompositeTask.py157
-rw-r--r--portage_with_autodep/pym/_emerge/DepPriority.py49
-rw-r--r--portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py47
-rw-r--r--portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py85
-rw-r--r--portage_with_autodep/pym/_emerge/Dependency.py20
-rw-r--r--portage_with_autodep/pym/_emerge/DependencyArg.py33
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBinpkg.py46
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBuild.py426
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBuildDir.py109
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildExecuter.py99
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildFetcher.py302
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildFetchonly.py32
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py108
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildMerge.py56
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py133
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildPhase.py350
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildProcess.py21
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py16
-rw-r--r--portage_with_autodep/pym/_emerge/EventsAnalyser.py511
-rw-r--r--portage_with_autodep/pym/_emerge/EventsLogger.py180
-rw-r--r--portage_with_autodep/pym/_emerge/FakeVartree.py265
-rw-r--r--portage_with_autodep/pym/_emerge/FifoIpcDaemon.py81
-rw-r--r--portage_with_autodep/pym/_emerge/JobStatusDisplay.py292
-rw-r--r--portage_with_autodep/pym/_emerge/MergeListItem.py135
-rw-r--r--portage_with_autodep/pym/_emerge/MetadataRegen.py184
-rw-r--r--portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py33
-rw-r--r--portage_with_autodep/pym/_emerge/Package.py700
-rw-r--r--portage_with_autodep/pym/_emerge/PackageArg.py19
-rw-r--r--portage_with_autodep/pym/_emerge/PackageMerge.py40
-rw-r--r--portage_with_autodep/pym/_emerge/PackageUninstall.py110
-rw-r--r--portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py145
-rw-r--r--portage_with_autodep/pym/_emerge/PipeReader.py96
-rw-r--r--portage_with_autodep/pym/_emerge/PollConstants.py18
-rw-r--r--portage_with_autodep/pym/_emerge/PollScheduler.py398
-rw-r--r--portage_with_autodep/pym/_emerge/PollSelectAdapter.py73
-rw-r--r--portage_with_autodep/pym/_emerge/ProgressHandler.py22
-rw-r--r--portage_with_autodep/pym/_emerge/QueueScheduler.py116
-rw-r--r--portage_with_autodep/pym/_emerge/RootConfig.py34
-rw-r--r--portage_with_autodep/pym/_emerge/Scheduler.py1975
-rw-r--r--portage_with_autodep/pym/_emerge/SequentialTaskQueue.py89
-rw-r--r--portage_with_autodep/pym/_emerge/SetArg.py11
-rw-r--r--portage_with_autodep/pym/_emerge/SlotObject.py42
-rw-r--r--portage_with_autodep/pym/_emerge/SpawnProcess.py235
-rw-r--r--portage_with_autodep/pym/_emerge/SubProcess.py141
-rw-r--r--portage_with_autodep/pym/_emerge/Task.py42
-rw-r--r--portage_with_autodep/pym/_emerge/TaskScheduler.py25
-rw-r--r--portage_with_autodep/pym/_emerge/TaskSequence.py44
-rw-r--r--portage_with_autodep/pym/_emerge/UninstallFailure.py15
-rw-r--r--portage_with_autodep/pym/_emerge/UnmergeDepPriority.py41
-rw-r--r--portage_with_autodep/pym/_emerge/UseFlagDisplay.py122
-rw-r--r--portage_with_autodep/pym/_emerge/__init__.py2
-rw-r--r--portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py38
-rw-r--r--portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py15
-rw-r--r--portage_with_autodep/pym/_emerge/actions.py3123
-rw-r--r--portage_with_autodep/pym/_emerge/clear_caches.py19
-rw-r--r--portage_with_autodep/pym/_emerge/countdown.py22
-rw-r--r--portage_with_autodep/pym/_emerge/create_depgraph_params.py72
-rw-r--r--portage_with_autodep/pym/_emerge/create_world_atom.py92
-rw-r--r--portage_with_autodep/pym/_emerge/depgraph.py7029
-rw-r--r--portage_with_autodep/pym/_emerge/emergelog.py63
-rw-r--r--portage_with_autodep/pym/_emerge/getloadavg.py27
-rw-r--r--portage_with_autodep/pym/_emerge/help.py815
-rw-r--r--portage_with_autodep/pym/_emerge/is_valid_package_atom.py21
-rw-r--r--portage_with_autodep/pym/_emerge/main.py1910
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/__init__.py2
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/backtracking.py197
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/circular_dependency.py267
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/output.py888
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/output_helpers.py576
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/slot_collision.py978
-rw-r--r--portage_with_autodep/pym/_emerge/search.py385
-rw-r--r--portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.py35
-rw-r--r--portage_with_autodep/pym/_emerge/stdout_spinner.py83
-rw-r--r--portage_with_autodep/pym/_emerge/sync/__init__.py2
-rw-r--r--portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.py29
-rw-r--r--portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.py98
-rw-r--r--portage_with_autodep/pym/_emerge/unmerge.py578
-rw-r--r--portage_with_autodep/pym/_emerge/userquery.py55
-rw-r--r--portage_with_autodep/pym/portage/__init__.py610
-rw-r--r--portage_with_autodep/pym/portage/_global_updates.py250
-rw-r--r--portage_with_autodep/pym/portage/_legacy_globals.py81
-rw-r--r--portage_with_autodep/pym/portage/_selinux.py129
-rw-r--r--portage_with_autodep/pym/portage/_sets/__init__.py245
-rw-r--r--portage_with_autodep/pym/portage/_sets/base.py264
-rw-r--r--portage_with_autodep/pym/portage/_sets/dbapi.py383
-rw-r--r--portage_with_autodep/pym/portage/_sets/files.py341
-rw-r--r--portage_with_autodep/pym/portage/_sets/libs.py98
-rw-r--r--portage_with_autodep/pym/portage/_sets/profiles.py53
-rw-r--r--portage_with_autodep/pym/portage/_sets/security.py86
-rw-r--r--portage_with_autodep/pym/portage/_sets/shell.py44
-rw-r--r--portage_with_autodep/pym/portage/cache/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/cache/anydbm.py113
-rw-r--r--portage_with_autodep/pym/portage/cache/cache_errors.py62
-rw-r--r--portage_with_autodep/pym/portage/cache/ebuild_xattr.py171
-rw-r--r--portage_with_autodep/pym/portage/cache/flat_hash.py155
-rw-r--r--portage_with_autodep/pym/portage/cache/flat_list.py134
-rw-r--r--portage_with_autodep/pym/portage/cache/fs_template.py90
-rw-r--r--portage_with_autodep/pym/portage/cache/mappings.py485
-rw-r--r--portage_with_autodep/pym/portage/cache/metadata.py154
-rw-r--r--portage_with_autodep/pym/portage/cache/metadata_overlay.py105
-rw-r--r--portage_with_autodep/pym/portage/cache/sql_template.py301
-rw-r--r--portage_with_autodep/pym/portage/cache/sqlite.py245
-rw-r--r--portage_with_autodep/pym/portage/cache/template.py236
-rw-r--r--portage_with_autodep/pym/portage/cache/util.py170
-rw-r--r--portage_with_autodep/pym/portage/cache/volatile.py25
-rw-r--r--portage_with_autodep/pym/portage/checksum.py291
-rw-r--r--portage_with_autodep/pym/portage/const.py143
-rw-r--r--portage_with_autodep/pym/portage/cvstree.py293
-rw-r--r--portage_with_autodep/pym/portage/data.py122
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_MergeProcess.py282
-rw-r--r--portage_with_autodep/pym/portage/dbapi/__init__.py302
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py72
-rw-r--r--portage_with_autodep/pym/portage/dbapi/bintree.py1366
-rw-r--r--portage_with_autodep/pym/portage/dbapi/cpv_expand.py106
-rw-r--r--portage_with_autodep/pym/portage/dbapi/dep_expand.py56
-rw-r--r--portage_with_autodep/pym/portage/dbapi/porttree.py1168
-rw-r--r--portage_with_autodep/pym/portage/dbapi/vartree.py4527
-rw-r--r--portage_with_autodep/pym/portage/dbapi/virtual.py131
-rw-r--r--portage_with_autodep/pym/portage/debug.py120
-rw-r--r--portage_with_autodep/pym/portage/dep/__init__.py2432
-rw-r--r--portage_with_autodep/pym/portage/dep/dep_check.py679
-rw-r--r--portage_with_autodep/pym/portage/dispatch_conf.py188
-rw-r--r--portage_with_autodep/pym/portage/eapi.py50
-rw-r--r--portage_with_autodep/pym/portage/eclass_cache.py123
-rw-r--r--portage_with_autodep/pym/portage/elog/__init__.py182
-rw-r--r--portage_with_autodep/pym/portage/elog/filtering.py15
-rw-r--r--portage_with_autodep/pym/portage/elog/messages.py172
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_custom.py19
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_echo.py46
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_mail.py43
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_mail_summary.py89
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_save.py51
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_save_summary.py59
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_syslog.py32
-rw-r--r--portage_with_autodep/pym/portage/env/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/env/config.py105
-rw-r--r--portage_with_autodep/pym/portage/env/loaders.py319
-rw-r--r--portage_with_autodep/pym/portage/env/validators.py20
-rw-r--r--portage_with_autodep/pym/portage/exception.py186
-rw-r--r--portage_with_autodep/pym/portage/getbinpkg.py861
-rw-r--r--portage_with_autodep/pym/portage/glsa.py699
-rw-r--r--portage_with_autodep/pym/portage/localization.py20
-rw-r--r--portage_with_autodep/pym/portage/locks.py395
-rw-r--r--portage_with_autodep/pym/portage/mail.py177
-rw-r--r--portage_with_autodep/pym/portage/manifest.py538
-rw-r--r--portage_with_autodep/pym/portage/news.py351
-rw-r--r--portage_with_autodep/pym/portage/output.py794
-rw-r--r--portage_with_autodep/pym/portage/package/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py284
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py236
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py182
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py189
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py235
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.py233
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.py23
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/features_set.py128
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/helper.py64
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py185
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.py27
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.py9
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py98
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py82
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/config.py2224
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.py42
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/digestcheck.py167
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/digestgen.py202
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/doebuild.py1791
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/fetch.py1129
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py124
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py174
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py370
-rw-r--r--portage_with_autodep/pym/portage/process.py427
-rw-r--r--portage_with_autodep/pym/portage/proxy/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/proxy/lazyimport.py212
-rw-r--r--portage_with_autodep/pym/portage/proxy/objectproxy.py91
-rw-r--r--portage_with_autodep/pym/portage/repository/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/repository/config.py504
-rw-r--r--portage_with_autodep/pym/portage/tests/__init__.py244
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/setup_env.py85
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/test_dobin.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/test_dodir.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py58
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testAtom.py315
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py219
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py18
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py75
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testStandalone.py36
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py43
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py28
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_get_operator.py33
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py42
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_isjustname.py24
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py146
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py108
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py66
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py627
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py43
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_config.py198
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py82
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py124
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py32
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py52
-rw-r--r--portage_with_autodep/pym/portage/tests/env/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/env/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py37
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py39
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py145
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py81
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.py42
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_compile_modules.py46
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_import_modules.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py124
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py46
-rw-r--r--portage_with_autodep/pym/portage/tests/news/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/news/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/news/test_NewsItem.py95
-rw-r--r--portage_with_autodep/pym/portage/tests/process/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/process/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/process/test_poll.py39
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py690
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py326
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py169
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py84
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_depclean.py285
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_depth.py252
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_eapi.py115
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py453
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py31
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py318
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_multislot.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_output.py88
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py138
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_required_use.py114
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_simple.py57
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py143
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py40
-rwxr-xr-xportage_with_autodep/pym/portage/tests/runTests46
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py61
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py32
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py27
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/testShell.py28
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/test_string_format.py108
-rw-r--r--portage_with_autodep/pym/portage/tests/util/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/util/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_digraph.py201
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_getconfig.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_grabdict.py11
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py14
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackDictList.py17
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackDicts.py36
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackLists.py19
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py24
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_varExpand.py92
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/test_vercmp.py80
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py16
-rw-r--r--portage_with_autodep/pym/portage/update.py320
-rw-r--r--portage_with_autodep/pym/portage/util/ExtractKernelVersion.py76
-rw-r--r--portage_with_autodep/pym/portage/util/__init__.py1602
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py805
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py172
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/util/_pty.py212
-rw-r--r--portage_with_autodep/pym/portage/util/digraph.py342
-rw-r--r--portage_with_autodep/pym/portage/util/env_update.py293
-rw-r--r--portage_with_autodep/pym/portage/util/lafilefixer.py185
-rw-r--r--portage_with_autodep/pym/portage/util/listdir.py151
-rw-r--r--portage_with_autodep/pym/portage/util/movefile.py242
-rw-r--r--portage_with_autodep/pym/portage/util/mtimedb.py81
-rw-r--r--portage_with_autodep/pym/portage/versions.py403
-rw-r--r--portage_with_autodep/pym/portage/xml/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/xml/metadata.py376
-rw-r--r--portage_with_autodep/pym/portage/xpak.py497
-rw-r--r--portage_with_autodep/pym/repoman/__init__.py0
-rw-r--r--portage_with_autodep/pym/repoman/checks.py707
-rw-r--r--portage_with_autodep/pym/repoman/errors.py26
-rw-r--r--portage_with_autodep/pym/repoman/herdbase.py110
-rw-r--r--portage_with_autodep/pym/repoman/utilities.py511
337 files changed, 74731 insertions, 0 deletions
diff --git a/portage_with_autodep/pym/_emerge/AbstractDepPriority.py b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
new file mode 100644
index 0000000..94a9379
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
@@ -0,0 +1,29 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from _emerge.SlotObject import SlotObject
+
+class AbstractDepPriority(SlotObject):
+ __slots__ = ("buildtime", "runtime", "runtime_post")
+
+ def __lt__(self, other):
+ return self.__int__() < other
+
+ def __le__(self, other):
+ return self.__int__() <= other
+
+ def __eq__(self, other):
+ return self.__int__() == other
+
+ def __ne__(self, other):
+ return self.__int__() != other
+
+ def __gt__(self, other):
+ return self.__int__() > other
+
+ def __ge__(self, other):
+ return self.__int__() >= other
+
+ def copy(self):
+ return copy.copy(self)
diff --git a/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
new file mode 100644
index 0000000..4147ecb
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
@@ -0,0 +1,266 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import stat
+import textwrap
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
+import portage
+from portage.elog import messages as elog_messages
+from portage.localization import _
+from portage.package.ebuild._ipc.ExitCommand import ExitCommand
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage import os
+from portage.util._pty import _create_pty_or_pipe
+from portage.util import apply_secpass_permissions
+
+class AbstractEbuildProcess(SpawnProcess):
+
+ __slots__ = ('phase', 'settings',) + \
+ ('_build_dir', '_ipc_daemon', '_exit_command',)
+ _phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
+
+ # Number of milliseconds to allow natural exit of the ebuild
+ # process after it has called the exit command via IPC. It
+ # doesn't hurt to be generous here since the scheduler
+ # continues to process events during this period, and it can
+ # return long before the timeout expires.
+ _exit_timeout = 10000 # 10 seconds
+
+ # The EbuildIpcDaemon support is well tested, but this variable
+ # is left so we can temporarily disable it if any issues arise.
+ _enable_ipc_daemon = True
+
+ def __init__(self, **kwargs):
+ SpawnProcess.__init__(self, **kwargs)
+ if self.phase is None:
+ phase = self.settings.get("EBUILD_PHASE")
+ if not phase:
+ phase = 'other'
+ self.phase = phase
+
+ def _start(self):
+
+ need_builddir = self.phase not in self._phases_without_builddir
+
+ # This can happen if the pre-clean phase triggers
+ # die_hooks for some reason, and PORTAGE_BUILDDIR
+ # doesn't exist yet.
+ if need_builddir and \
+ not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+ msg = _("The ebuild phase '%s' has been aborted "
+ "since PORTAGE_BUILDIR does not exist: '%s'") % \
+ (self.phase, self.settings['PORTAGE_BUILDDIR'])
+ self._eerror(textwrap.wrap(msg, 72))
+ self._set_returncode((self.pid, 1 << 8))
+ self.wait()
+ return
+
+ if self.background:
+ # Automatically prevent color codes from showing up in logs,
+ # since we're not displaying to a terminal anyway.
+ self.settings['NOCOLOR'] = 'true'
+
+ if self._enable_ipc_daemon:
+ self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+ if self.phase not in self._phases_without_builddir:
+ if 'PORTAGE_BUILDIR_LOCKED' not in self.settings:
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=self.settings)
+ self._build_dir.lock()
+ self.settings['PORTAGE_IPC_DAEMON'] = "1"
+ self._start_ipc_daemon()
+ else:
+ self.settings.pop('PORTAGE_IPC_DAEMON', None)
+ else:
+ # Since the IPC daemon is disabled, use a simple tempfile based
+ # approach to detect unexpected exit like in bug #190128.
+ self.settings.pop('PORTAGE_IPC_DAEMON', None)
+ if self.phase not in self._phases_without_builddir:
+ exit_file = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'],
+ '.exit_status')
+ self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file
+ try:
+ os.unlink(exit_file)
+ except OSError:
+ if os.path.exists(exit_file):
+ # make sure it doesn't exist
+ raise
+ else:
+ self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+
+ SpawnProcess._start(self)
+
+ def _init_ipc_fifos(self):
+
+ input_fifo = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'], '.ipc_in')
+ output_fifo = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'], '.ipc_out')
+
+ for p in (input_fifo, output_fifo):
+
+ st = None
+ try:
+ st = os.lstat(p)
+ except OSError:
+ os.mkfifo(p)
+ else:
+ if not stat.S_ISFIFO(st.st_mode):
+ st = None
+ try:
+ os.unlink(p)
+ except OSError:
+ pass
+ os.mkfifo(p)
+
+ apply_secpass_permissions(p,
+ uid=os.getuid(),
+ gid=portage.data.portage_gid,
+ mode=0o770, stat_cached=st)
+
+ return (input_fifo, output_fifo)
+
+ def _start_ipc_daemon(self):
+ self._exit_command = ExitCommand()
+ self._exit_command.reply_hook = self._exit_command_callback
+ query_command = QueryCommand(self.settings, self.phase)
+ commands = {
+ 'best_version' : query_command,
+ 'exit' : self._exit_command,
+ 'has_version' : query_command,
+ }
+ input_fifo, output_fifo = self._init_ipc_fifos()
+ self._ipc_daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo,
+ scheduler=self.scheduler)
+ self._ipc_daemon.start()
+
+ def _exit_command_callback(self):
+ if self._registered:
+ # Let the process exit naturally, if possible.
+ self.scheduler.schedule(self._reg_id, timeout=self._exit_timeout)
+ if self._registered:
+ # If it doesn't exit naturally in a reasonable amount
+ # of time, kill it (solves bug #278895). We try to avoid
+ # this when possible since it makes sandbox complain about
+ # being killed by a signal.
+ self.cancel()
+
+ def _orphan_process_warn(self):
+ phase = self.phase
+
+ msg = _("The ebuild phase '%s' with pid %s appears "
+ "to have left an orphan process running in the "
+ "background.") % (phase, self.pid)
+
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _pipe(self, fd_pipes):
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _can_log(self, slave_fd):
+ # With sesandbox, logging works through a pty but not through a
+ # normal pipe. So, disable logging if ptys are broken.
+ # See Bug #162404.
+ # TODO: Add support for logging via named pipe (fifo) with
+ # sesandbox, since EbuildIpcDaemon uses a fifo and it's known
+ # to be compatible with sesandbox.
+ return not ('sesandbox' in self.settings.features \
+ and self.settings.selinux_enabled()) or os.isatty(slave_fd)
+
+ def _killed_by_signal(self, signum):
+ msg = _("The ebuild phase '%s' has been "
+ "killed by signal %s.") % (self.phase, signum)
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _unexpected_exit(self):
+
+ phase = self.phase
+
+ msg = _("The ebuild phase '%s' has exited "
+ "unexpectedly. This type of behavior "
+ "is known to be triggered "
+ "by things such as failed variable "
+ "assignments (bug #190128) or bad substitution "
+ "errors (bug #200313). Normally, before exiting, bash should "
+ "have displayed an error message above. If bash did not "
+ "produce an error message above, it's possible "
+ "that the ebuild has called `exit` when it "
+ "should have called `die` instead. This behavior may also "
+ "be triggered by a corrupt bash binary or a hardware "
+ "problem such as memory or cpu malfunction. If the problem is not "
+ "reproducible or it appears to occur randomly, then it is likely "
+ "to be triggered by a hardware problem. "
+ "If you suspect a hardware problem then you should "
+ "try some basic hardware diagnostics such as memtest. "
+ "Please do not report this as a bug unless it is consistently "
+ "reproducible and you are sure that your bash binary and hardware "
+ "are functioning properly.") % phase
+
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _eerror(self, lines):
+ self._elog('eerror', lines)
+
+ def _elog(self, elog_funcname, lines):
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ if msg:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ self.scheduler.output(msg, log_path=log_path)
+
+ def _log_poll_exception(self, event):
+ self._elog("eerror",
+ ["%s received strange poll event: %s\n" % \
+ (self.__class__.__name__, event,)])
+
+ def _set_returncode(self, wait_retval):
+ SpawnProcess._set_returncode(self, wait_retval)
+
+ if self._ipc_daemon is not None:
+ self._ipc_daemon.cancel()
+ if self._exit_command.exitcode is not None:
+ self.returncode = self._exit_command.exitcode
+ else:
+ if self.returncode < 0:
+ if not self.cancelled:
+ self._killed_by_signal(-self.returncode)
+ else:
+ self.returncode = 1
+ if not self.cancelled:
+ self._unexpected_exit()
+ if self._build_dir is not None:
+ self._build_dir.unlock()
+ self._build_dir = None
+ elif not self.cancelled:
+ exit_file = self.settings.get('PORTAGE_EBUILD_EXIT_FILE')
+ if exit_file and not os.path.exists(exit_file):
+ if self.returncode < 0:
+ if not self.cancelled:
+ self._killed_by_signal(-self.returncode)
+ else:
+ self.returncode = 1
+ if not self.cancelled:
+ self._unexpected_exit()
diff --git a/portage_with_autodep/pym/_emerge/AbstractPollTask.py b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
new file mode 100644
index 0000000..f7f3a95
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
@@ -0,0 +1,62 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import logging
+
+from portage.util import writemsg_level
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollConstants import PollConstants
+class AbstractPollTask(AsynchronousTask):
+
+ __slots__ = ("scheduler",) + \
+ ("_registered",)
+
+ _bufsize = 4096
+ _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
+ _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
+ _exceptional_events
+
+ def isAlive(self):
+ return bool(self._registered)
+
+ def _read_buf(self, f, event):
+ """
+ | POLLIN | RETURN
+ | BIT | VALUE
+ | ---------------------------------------------------
+ | 1 | Read self._bufsize into an instance of
+ | | array.array('B') and return it, ignoring
+ | | EOFError and IOError. An empty array
+ | | indicates EOF.
+ | ---------------------------------------------------
+ | 0 | None
+ """
+ buf = None
+ if event & PollConstants.POLLIN:
+ buf = array.array('B')
+ try:
+ buf.fromfile(f, self._bufsize)
+ except (EOFError, IOError):
+ pass
+ return buf
+
+ def _unregister(self):
+ raise NotImplementedError(self)
+
+ def _log_poll_exception(self, event):
+ writemsg_level(
+ "!!! %s received strange poll event: %s\n" % \
+ (self.__class__.__name__, event,),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _unregister_if_appropriate(self, event):
+ if self._registered:
+ if event & self._exceptional_events:
+ self._log_poll_exception(event)
+ self._unregister()
+ self.cancel()
+ elif event & PollConstants.POLLHUP:
+ self._unregister()
+ self.wait()
+
diff --git a/portage_with_autodep/pym/_emerge/AsynchronousLock.py b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
new file mode 100644
index 0000000..637ba73
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
@@ -0,0 +1,288 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import dummy_threading
+import fcntl
+import logging
+import sys
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+from portage import os
+from portage.exception import TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.util import writemsg_level
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollConstants import PollConstants
+from _emerge.SpawnProcess import SpawnProcess
+
+class AsynchronousLock(AsynchronousTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using either a thread (if available) or a subprocess.
+
+ The default behavior is to use a process instead of a thread, since
+ there is currently no way to interrupt a thread that is waiting for
+ a lock (notably, SIGINT doesn't work because python delivers all
+ signals to the main thread).
+ """
+
+ __slots__ = ('path', 'scheduler',) + \
+ ('_imp', '_force_async', '_force_dummy', '_force_process', \
+ '_force_thread', '_waiting')
+
+ _use_process_by_default = True
+
+ def _start(self):
+
+ if not self._force_async:
+ try:
+ self._imp = lockfile(self.path,
+ wantnewlockfile=True, flags=os.O_NONBLOCK)
+ except TryAgain:
+ pass
+ else:
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ if self._force_process or \
+ (not self._force_thread and \
+ (self._use_process_by_default or threading is dummy_threading)):
+ self._imp = _LockProcess(path=self.path, scheduler=self.scheduler)
+ else:
+ self._imp = _LockThread(path=self.path,
+ scheduler=self.scheduler,
+ _force_dummy=self._force_dummy)
+
+ self._imp.addExitListener(self._imp_exit)
+ self._imp.start()
+
+ def _imp_exit(self, imp):
+ # call exit listeners
+ if not self._waiting:
+ self.wait()
+
+ def _cancel(self):
+ if isinstance(self._imp, AsynchronousTask):
+ self._imp.cancel()
+
+ def _poll(self):
+ if isinstance(self._imp, AsynchronousTask):
+ self._imp.poll()
+ return self.returncode
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._waiting = True
+ self.returncode = self._imp.wait()
+ self._waiting = False
+ return self.returncode
+
+ def unlock(self):
+ if self._imp is None:
+ raise AssertionError('not locked')
+ if isinstance(self._imp, (_LockProcess, _LockThread)):
+ self._imp.unlock()
+ else:
+ unlockfile(self._imp)
+ self._imp = None
+
+class _LockThread(AbstractPollTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using a background thread. After the lock is acquired, the thread
+ writes to a pipe in order to notify a poll loop running in the main
+ thread.
+
+ If the threading module is unavailable then the dummy_threading
+ module will be used, and the lock will be acquired synchronously
+ (before the start() method returns).
+ """
+
+ __slots__ = ('path',) + \
+ ('_files', '_force_dummy', '_lock_obj',
+ '_thread', '_reg_id',)
+
+ def _start(self):
+ pr, pw = os.pipe()
+ self._files = {}
+ self._files['pipe_read'] = os.fdopen(pr, 'rb', 0)
+ self._files['pipe_write'] = os.fdopen(pw, 'wb', 0)
+ for k, f in self._files.items():
+ fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
+ fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+ self._reg_id = self.scheduler.register(self._files['pipe_read'].fileno(),
+ PollConstants.POLLIN, self._output_handler)
+ self._registered = True
+ threading_mod = threading
+ if self._force_dummy:
+ threading_mod = dummy_threading
+ self._thread = threading_mod.Thread(target=self._run_lock)
+ self._thread.start()
+
+ def _run_lock(self):
+ self._lock_obj = lockfile(self.path, wantnewlockfile=True)
+ self._files['pipe_write'].write(b'\0')
+
+ def _output_handler(self, f, event):
+ buf = self._read_buf(self._files['pipe_read'], event)
+ if buf:
+ self._unregister()
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _cancel(self):
+ # There's currently no way to force thread termination.
+ pass
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ if self._registered:
+ self.scheduler.schedule(self._reg_id)
+ return self.returncode
+
+ def unlock(self):
+ if self._lock_obj is None:
+ raise AssertionError('not locked')
+ if self.returncode is None:
+ raise AssertionError('lock not acquired yet')
+ unlockfile(self._lock_obj)
+ self._lock_obj = None
+
+ def _unregister(self):
+ self._registered = False
+
+ if self._thread is not None:
+ self._thread.join()
+ self._thread = None
+
+ if self._reg_id is not None:
+ self.scheduler.unregister(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ for f in self._files.values():
+ f.close()
+ self._files = None
+
+class _LockProcess(AbstractPollTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using a subprocess. After the lock is acquired, the process
+ writes to a pipe in order to notify a poll loop running in the main
+ process. The unlock() method notifies the subprocess to release the
+ lock and exit.
+ """
+
+ __slots__ = ('path',) + \
+ ('_acquired', '_kill_test', '_proc', '_files', '_reg_id', '_unlocked')
+
+ def _start(self):
+ in_pr, in_pw = os.pipe()
+ out_pr, out_pw = os.pipe()
+ self._files = {}
+ self._files['pipe_in'] = os.fdopen(in_pr, 'rb', 0)
+ self._files['pipe_out'] = os.fdopen(out_pw, 'wb', 0)
+ fcntl.fcntl(in_pr, fcntl.F_SETFL,
+ fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ self._reg_id = self.scheduler.register(in_pr,
+ PollConstants.POLLIN, self._output_handler)
+ self._registered = True
+ self._proc = SpawnProcess(
+ args=[portage._python_interpreter,
+ os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
+ env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
+ fd_pipes={0:out_pr, 1:in_pw, 2:sys.stderr.fileno()},
+ scheduler=self.scheduler)
+ self._proc.addExitListener(self._proc_exit)
+ self._proc.start()
+ os.close(out_pr)
+ os.close(in_pw)
+
+ def _proc_exit(self, proc):
+ if proc.returncode != os.EX_OK:
+ # Typically, this will happen due to the
+ # process being killed by a signal.
+ if not self._acquired:
+ # If the lock hasn't been aquired yet, the
+ # caller can check the returncode and handle
+ # this failure appropriately.
+ if not (self.cancelled or self._kill_test):
+ writemsg_level("_LockProcess: %s\n" % \
+ _("failed to acquire lock on '%s'") % (self.path,),
+ level=logging.ERROR, noiselevel=-1)
+ self._unregister()
+ self.returncode = proc.returncode
+ self.wait()
+ return
+
+ if not self.cancelled and \
+ not self._unlocked:
+ # We don't want lost locks going unnoticed, so it's
+ # only safe to ignore if either the cancel() or
+ # unlock() methods have been previously called.
+ raise AssertionError("lock process failed with returncode %s" \
+ % (proc.returncode,))
+
+ def _cancel(self):
+ if self._proc is not None:
+ self._proc.cancel()
+
+ def _poll(self):
+ if self._proc is not None:
+ self._proc.poll()
+ return self.returncode
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ if self._registered:
+ self.scheduler.schedule(self._reg_id)
+ return self.returncode
+
+ def _output_handler(self, f, event):
+ buf = self._read_buf(self._files['pipe_in'], event)
+ if buf:
+ self._acquired = True
+ self._unregister()
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _unregister(self):
+ self._registered = False
+
+ if self._reg_id is not None:
+ self.scheduler.unregister(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ try:
+ pipe_in = self._files.pop('pipe_in')
+ except KeyError:
+ pass
+ else:
+ pipe_in.close()
+
+ def unlock(self):
+ if self._proc is None:
+ raise AssertionError('not locked')
+ if self.returncode is None:
+ raise AssertionError('lock not acquired yet')
+ if self.returncode != os.EX_OK:
+ raise AssertionError("lock process failed with returncode %s" \
+ % (self.returncode,))
+ self._unlocked = True
+ self._files['pipe_out'].write(b'\0')
+ self._files['pipe_out'].close()
+ self._files = None
+ self._proc.wait()
+ self._proc = None
diff --git a/portage_with_autodep/pym/_emerge/AsynchronousTask.py b/portage_with_autodep/pym/_emerge/AsynchronousTask.py
new file mode 100644
index 0000000..36522ca
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AsynchronousTask.py
@@ -0,0 +1,129 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.SlotObject import SlotObject
+class AsynchronousTask(SlotObject):
+ """
+ Subclasses override _wait() and _poll() so that calls
+ to public methods can be wrapped for implementing
+ hooks such as exit listener notification.
+
+ Sublasses should call self.wait() to notify exit listeners after
+ the task is complete and self.returncode has been set.
+ """
+
+ __slots__ = ("background", "cancelled", "returncode") + \
+ ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
+
+ def start(self):
+ """
+ Start an asynchronous task and then return as soon as possible.
+ """
+ self._start_hook()
+ self._start()
+
+ def _start(self):
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def isAlive(self):
+ return self.returncode is None
+
+ def poll(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._poll()
+ self._wait_hook()
+ return self.returncode
+
+ def _poll(self):
+ return self.returncode
+
+ def wait(self):
+ if self.returncode is None:
+ self._wait()
+ self._wait_hook()
+ return self.returncode
+
+ def _wait(self):
+ return self.returncode
+
+ def cancel(self):
+ if not self.cancelled:
+ self.cancelled = True
+ self._cancel()
+ self.wait()
+
+ def _cancel(self):
+ """
+ Subclasses should implement this, as a template method
+ to be called by AsynchronousTask.cancel().
+ """
+ pass
+
+ def addStartListener(self, f):
+ """
+ The function will be called with one argument, a reference to self.
+ """
+ if self._start_listeners is None:
+ self._start_listeners = []
+ self._start_listeners.append(f)
+
+ def removeStartListener(self, f):
+ if self._start_listeners is None:
+ return
+ self._start_listeners.remove(f)
+
+ def _start_hook(self):
+ if self._start_listeners is not None:
+ start_listeners = self._start_listeners
+ self._start_listeners = None
+
+ for f in start_listeners:
+ f(self)
+
+ def addExitListener(self, f):
+ """
+ The function will be called with one argument, a reference to self.
+ """
+ if self._exit_listeners is None:
+ self._exit_listeners = []
+ self._exit_listeners.append(f)
+
+ def removeExitListener(self, f):
+ if self._exit_listeners is None:
+ if self._exit_listener_stack is not None:
+ self._exit_listener_stack.remove(f)
+ return
+ self._exit_listeners.remove(f)
+
+ def _wait_hook(self):
+ """
+ Call this method after the task completes, just before returning
+ the returncode from wait() or poll(). This hook is
+ used to trigger exit listeners when the returncode first
+ becomes available.
+ """
+ if self.returncode is not None and \
+ self._exit_listeners is not None:
+
+ # This prevents recursion, in case one of the
+ # exit handlers triggers this method again by
+ # calling wait(). Use a stack that gives
+ # removeExitListener() an opportunity to consume
+ # listeners from the stack, before they can get
+ # called below. This is necessary because a call
+ # to one exit listener may result in a call to
+ # removeExitListener() for another listener on
+ # the stack. That listener needs to be removed
+ # from the stack since it would be inconsistent
+ # to call it after it has been been passed into
+ # removeExitListener().
+ self._exit_listener_stack = self._exit_listeners
+ self._exit_listeners = None
+
+ self._exit_listener_stack.reverse()
+ while self._exit_listener_stack:
+ self._exit_listener_stack.pop()(self)
+
diff --git a/portage_with_autodep/pym/_emerge/AtomArg.py b/portage_with_autodep/pym/_emerge/AtomArg.py
new file mode 100644
index 0000000..a929b43
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AtomArg.py
@@ -0,0 +1,11 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage._sets.base import InternalPackageSet
+from _emerge.DependencyArg import DependencyArg
+
+class AtomArg(DependencyArg):
+ def __init__(self, atom=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.atom = atom
+ self.pset = InternalPackageSet(initial_atoms=(self.atom,), allow_repo=True)
diff --git a/portage_with_autodep/pym/_emerge/Binpkg.py b/portage_with_autodep/pym/_emerge/Binpkg.py
new file mode 100644
index 0000000..bc6511e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Binpkg.py
@@ -0,0 +1,333 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.BinpkgExtractorAsync import BinpkgExtractorAsync
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from portage.eapi import eapi_exports_replace_vars
+from portage.util import writemsg
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import io
+import logging
+from portage.output import colorize
+
+class Binpkg(CompositeTask):
+
+ __slots__ = ("find_blockers",
+ "ldpath_mtimes", "logger", "opts",
+ "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
+ ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
+ "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
+
+ def _writemsg_level(self, msg, level=0, noiselevel=0):
+ self.scheduler.output(msg, level=level, noiselevel=noiselevel,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+ def _start(self):
+
+ pkg = self.pkg
+ settings = self.settings
+ settings.setcpv(pkg)
+ self._tree = "bintree"
+ self._bintree = self.pkg.root_config.trees[self._tree]
+ self._verify = not self.opts.pretend
+
+ # Use realpath like doebuild_environment() does, since we assert
+ # that this path is literally identical to PORTAGE_BUILDDIR.
+ dir_path = os.path.join(os.path.realpath(settings["PORTAGE_TMPDIR"]),
+ "portage", pkg.category, pkg.pf)
+ self._image_dir = os.path.join(dir_path, "image")
+ self._infloc = os.path.join(dir_path, "build-info")
+ self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
+ settings["EBUILD"] = self._ebuild_path
+ portage.doebuild_environment(self._ebuild_path, 'setup',
+ settings=self.settings, db=self._bintree.dbapi)
+ if dir_path != self.settings['PORTAGE_BUILDDIR']:
+ raise AssertionError("'%s' != '%s'" % \
+ (dir_path, self.settings['PORTAGE_BUILDDIR']))
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=settings)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ if eapi_exports_replace_vars(settings["EAPI"]):
+ vardb = self.pkg.root_config.trees["vartree"].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(x) \
+ for x in vardb.match(self.pkg.slot_atom) + \
+ vardb.match('='+self.pkg.cpv)))
+
+ # The prefetcher has already completed or it
+ # could be running now. If it's running now,
+ # wait for it to complete since it holds
+ # a lock on the file being fetched. The
+ # portage.locks functions are only designed
+ # to work between separate processes. Since
+ # the lock is held by the current process,
+ # use the scheduler and fetcher methods to
+ # synchronize with the fetcher.
+ prefetcher = self.prefetcher
+ if prefetcher is None:
+ pass
+ elif prefetcher.isAlive() and \
+ prefetcher.poll() is None:
+
+ waiting_msg = ("Fetching '%s' " + \
+ "in the background. " + \
+ "To view fetch progress, run `tail -f " + \
+ "/var/log/emerge-fetch.log` in another " + \
+ "terminal.") % prefetcher.pkg_path
+ msg_prefix = colorize("GOOD", " * ")
+ from textwrap import wrap
+ waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
+ for line in wrap(waiting_msg, 65))
+ if not self.background:
+ writemsg(waiting_msg, noiselevel=-1)
+
+ self._current_task = prefetcher
+ prefetcher.addExitListener(self._prefetch_exit)
+ return
+
+ self._prefetch_exit(prefetcher)
+
+ def _prefetch_exit(self, prefetcher):
+
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ if not (self.opts.pretend or self.opts.fetchonly):
+ self._build_dir.lock()
+ # Initialize PORTAGE_LOG_FILE (clean_log won't work without it).
+ portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+ # If necessary, discard old log so that we don't
+ # append to it.
+ self._build_dir.clean_log()
+ fetcher = BinpkgFetcher(background=self.background,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
+ pretend=self.opts.pretend, scheduler=self.scheduler)
+ pkg_path = fetcher.pkg_path
+ self._pkg_path = pkg_path
+ # This gives bashrc users an opportunity to do various things
+ # such as remove binary packages after they're installed.
+ self.settings["PORTAGE_BINPKG_FILE"] = pkg_path
+
+ if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
+
+ msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
+ short_msg = "emerge: (%s of %s) %s Fetch" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ self.logger.log(msg, short_msg=short_msg)
+
+ # Allow the Scheduler's fetch queue to control the
+ # number of concurrent fetchers.
+ fetcher.addExitListener(self._fetcher_exit)
+ self._task_queued(fetcher)
+ self.scheduler.fetch.schedule(fetcher)
+ return
+
+ self._fetcher_exit(fetcher)
+
+ def _fetcher_exit(self, fetcher):
+
+ # The fetcher only has a returncode when
+ # --getbinpkg is enabled.
+ if fetcher.returncode is not None:
+ self._fetched_pkg = True
+ if self._default_exit(fetcher) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ if self.opts.pretend:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ verifier = None
+ if self._verify:
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ verifier = BinpkgVerifier(background=self.background,
+ logfile=logfile, pkg=self.pkg, scheduler=self.scheduler)
+ self._start_task(verifier, self._verifier_exit)
+ return
+
+ self._verifier_exit(verifier)
+
+ def _verifier_exit(self, verifier):
+ if verifier is not None and \
+ self._default_exit(verifier) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ logger = self.logger
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ pkg_path = self._pkg_path
+
+ if self._fetched_pkg:
+ self._bintree.inject(pkg.cpv, filename=pkg_path)
+
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ if logfile is not None and os.path.isfile(logfile):
+ # Remove fetch log after successful fetch.
+ try:
+ os.unlink(logfile)
+ except OSError:
+ pass
+
+ if self.opts.fetchonly:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ msg = " === (%s of %s) Merging Binary (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
+ short_msg = "emerge: (%s of %s) %s Merge Binary" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ phase = "clean"
+ settings = self.settings
+ ebuild_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler,
+ settings=settings)
+
+ self._start_task(ebuild_phase, self._clean_exit)
+
+ def _clean_exit(self, clean_phase):
+ if self._default_exit(clean_phase) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ dir_path = self.settings['PORTAGE_BUILDDIR']
+
+ infloc = self._infloc
+ pkg = self.pkg
+ pkg_path = self._pkg_path
+
+ dir_mode = 0o755
+ for mydir in (dir_path, self._image_dir, infloc):
+ portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
+ gid=portage.data.portage_gid, mode=dir_mode)
+
+ # This initializes PORTAGE_LOG_FILE.
+ portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+ self._writemsg_level(">>> Extracting info\n")
+
+ pkg_xpak = portage.xpak.tbz2(self._pkg_path)
+ check_missing_metadata = ("CATEGORY", "PF")
+ missing_metadata = set()
+ for k in check_missing_metadata:
+ v = pkg_xpak.getfile(_unicode_encode(k,
+ encoding=_encodings['repo.content']))
+ if not v:
+ missing_metadata.add(k)
+
+ pkg_xpak.unpackinfo(infloc)
+ for k in missing_metadata:
+ if k == "CATEGORY":
+ v = pkg.category
+ elif k == "PF":
+ v = pkg.pf
+ else:
+ continue
+
+ f = io.open(_unicode_encode(os.path.join(infloc, k),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'],
+ errors='backslashreplace')
+ try:
+ f.write(_unicode_decode(v + "\n"))
+ finally:
+ f.close()
+
+ # Store the md5sum in the vdb.
+ f = io.open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'], errors='strict')
+ try:
+ f.write(_unicode_decode(
+ str(portage.checksum.perform_md5(pkg_path)) + "\n"))
+ finally:
+ f.close()
+
+ env_extractor = BinpkgEnvExtractor(background=self.background,
+ scheduler=self.scheduler, settings=self.settings)
+
+ self._start_task(env_extractor, self._env_extractor_exit)
+
+ def _env_extractor_exit(self, env_extractor):
+ if self._default_exit(env_extractor) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ setup_phase = EbuildPhase(background=self.background,
+ phase="setup", scheduler=self.scheduler,
+ settings=self.settings)
+
+ setup_phase.addExitListener(self._setup_exit)
+ self._task_queued(setup_phase)
+ self.scheduler.scheduleSetup(setup_phase)
+
+ def _setup_exit(self, setup_phase):
+ if self._default_exit(setup_phase) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ extractor = BinpkgExtractorAsync(background=self.background,
+ env=self.settings.environ(),
+ image_dir=self._image_dir,
+ pkg=self.pkg, pkg_path=self._pkg_path,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"),
+ scheduler=self.scheduler)
+ self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
+ self._start_task(extractor, self._extractor_exit)
+
+ def _extractor_exit(self, extractor):
+ if self._final_exit(extractor) != os.EX_OK:
+ self._unlock_builddir()
+ self._writemsg_level("!!! Error Extracting '%s'\n" % \
+ self._pkg_path, noiselevel=-1, level=logging.ERROR)
+ self.wait()
+
+ def _unlock_builddir(self):
+ if self.opts.pretend or self.opts.fetchonly:
+ return
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self._build_dir.unlock()
+
+ def create_install_task(self):
+ task = EbuildMerge(find_blockers=self.find_blockers,
+ ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
+ pkg=self.pkg, pkg_count=self.pkg_count,
+ pkg_path=self._pkg_path, scheduler=self.scheduler,
+ settings=self.settings, tree=self._tree,
+ world_atom=self.world_atom)
+ task.addExitListener(self._install_exit)
+ return task
+
+ def _install_exit(self, task):
+ self.settings.pop("PORTAGE_BINPKG_FILE", None)
+ self._unlock_builddir()
+ if task.returncode == os.EX_OK and \
+ 'binpkg-logs' not in self.settings.features and \
+ self.settings.get("PORTAGE_LOG_FILE"):
+ try:
+ os.unlink(self.settings["PORTAGE_LOG_FILE"])
+ except OSError:
+ pass
diff --git a/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
new file mode 100644
index 0000000..f68971b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
@@ -0,0 +1,66 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.SpawnProcess import SpawnProcess
+from portage import os, _shell_quote, _unicode_encode
+from portage.const import BASH_BINARY
+
+class BinpkgEnvExtractor(CompositeTask):
+ """
+ Extract environment.bz2 for a binary or installed package.
+ """
+ __slots__ = ('settings',)
+
+ def saved_env_exists(self):
+ return os.path.exists(self._get_saved_env_path())
+
+ def dest_env_exists(self):
+ return os.path.exists(self._get_dest_env_path())
+
+ def _get_saved_env_path(self):
+ return os.path.join(os.path.dirname(self.settings['EBUILD']),
+ "environment.bz2")
+
+ def _get_dest_env_path(self):
+ return os.path.join(self.settings["T"], "environment")
+
+ def _start(self):
+ saved_env_path = self._get_saved_env_path()
+ dest_env_path = self._get_dest_env_path()
+ shell_cmd = "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- %s > %s" % \
+ (_shell_quote(saved_env_path),
+ _shell_quote(dest_env_path))
+ extractor_proc = SpawnProcess(
+ args=[BASH_BINARY, "-c", shell_cmd],
+ background=self.background,
+ env=self.settings.environ(),
+ scheduler=self.scheduler,
+ logfile=self.settings.get('PORTAGE_LOGFILE'))
+
+ self._start_task(extractor_proc, self._extractor_exit)
+
+ def _remove_dest_env(self):
+ try:
+ os.unlink(self._get_dest_env_path())
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ def _extractor_exit(self, extractor_proc):
+
+ if self._default_exit(extractor_proc) != os.EX_OK:
+ self._remove_dest_env()
+ self.wait()
+ return
+
+ # This is a signal to ebuild.sh, so that it knows to filter
+ # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
+ # would be preserved between normal phases.
+ open(_unicode_encode(self._get_dest_env_path() + '.raw'), 'w')
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
diff --git a/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
new file mode 100644
index 0000000..d1630f2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
@@ -0,0 +1,31 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SpawnProcess import SpawnProcess
+import portage
+import os
+import signal
+
+class BinpkgExtractorAsync(SpawnProcess):
+
+ __slots__ = ("image_dir", "pkg", "pkg_path")
+
+ _shell_binary = portage.const.BASH_BINARY
+
+ def _start(self):
+ # Add -q to bzip2 opts, in order to avoid "trailing garbage after
+ # EOF ignored" warning messages due to xpak trailer.
+ # SIGPIPE handling (128 + SIGPIPE) should be compatible with
+ # assert_sigpipe_ok() that's used by the ebuild unpack() helper.
+ self.args = [self._shell_binary, "-c",
+ ("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp -C %s -f - ; " + \
+ "p=(${PIPESTATUS[@]}) ; " + \
+ "if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \
+ "echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \
+ "if [ ${p[1]} != 0 ] ; then " + \
+ "echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \
+ "exit 0 ;") % \
+ (portage._shell_quote(self.pkg_path),
+ portage._shell_quote(self.image_dir))]
+
+ SpawnProcess._start(self)
diff --git a/portage_with_autodep/pym/_emerge/BinpkgFetcher.py b/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
new file mode 100644
index 0000000..baea4d6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
@@ -0,0 +1,181 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.SpawnProcess import SpawnProcess
+try:
+ from urllib.parse import urlparse as urllib_parse_urlparse
+except ImportError:
+ from urlparse import urlparse as urllib_parse_urlparse
+import stat
+import sys
+import portage
+from portage import os
+from portage.util._pty import _create_pty_or_pipe
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class BinpkgFetcher(SpawnProcess):
+
+ __slots__ = ("pkg", "pretend",
+ "locked", "pkg_path", "_lock_obj")
+
+ def __init__(self, **kwargs):
+ SpawnProcess.__init__(self, **kwargs)
+ pkg = self.pkg
+ self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
+
+ def _start(self):
+
+ if self.cancelled:
+ return
+
+ pkg = self.pkg
+ pretend = self.pretend
+ bintree = pkg.root_config.trees["bintree"]
+ settings = bintree.settings
+ use_locks = "distlocks" in settings.features
+ pkg_path = self.pkg_path
+
+ if not pretend:
+ portage.util.ensure_dirs(os.path.dirname(pkg_path))
+ if use_locks:
+ self.lock()
+ exists = os.path.exists(pkg_path)
+ resume = exists and os.path.basename(pkg_path) in bintree.invalids
+ if not (pretend or resume):
+ # Remove existing file or broken symlink.
+ try:
+ os.unlink(pkg_path)
+ except OSError:
+ pass
+
+ # urljoin doesn't work correctly with
+ # unrecognized protocols like sftp
+ if bintree._remote_has_index:
+ rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
+ if not rel_uri:
+ rel_uri = pkg.cpv + ".tbz2"
+ remote_base_uri = bintree._remotepkgs[pkg.cpv]["BASE_URI"]
+ uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/")
+ else:
+ uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
+ "/" + pkg.pf + ".tbz2"
+
+ if pretend:
+ portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self.wait()
+ return
+
+ protocol = urllib_parse_urlparse(uri)[0]
+ fcmd_prefix = "FETCHCOMMAND"
+ if resume:
+ fcmd_prefix = "RESUMECOMMAND"
+ fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
+ if not fcmd:
+ fcmd = settings.get(fcmd_prefix)
+
+ fcmd_vars = {
+ "DISTDIR" : os.path.dirname(pkg_path),
+ "URI" : uri,
+ "FILE" : os.path.basename(pkg_path)
+ }
+
+ fetch_env = dict(settings.items())
+ fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
+ for x in portage.util.shlex_split(fcmd)]
+
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ fd_pipes = self.fd_pipes
+
+ # Redirect all output to stdout since some fetchers like
+ # wget pollute stderr (if portage detects a problem then it
+ # can send it's own message to stderr).
+ fd_pipes.setdefault(0, sys.stdin.fileno())
+ fd_pipes.setdefault(1, sys.stdout.fileno())
+ fd_pipes.setdefault(2, sys.stdout.fileno())
+
+ self.args = fetch_args
+ self.env = fetch_env
+ if settings.selinux_enabled():
+ self._selinux_type = settings["PORTAGE_FETCH_T"]
+ SpawnProcess._start(self)
+
+ def _pipe(self, fd_pipes):
+ """When appropriate, use a pty so that fetcher progress bars,
+ like wget has, will work properly."""
+ if self.background or not sys.stdout.isatty():
+ # When the output only goes to a log file,
+ # there's no point in creating a pty.
+ return os.pipe()
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _set_returncode(self, wait_retval):
+ SpawnProcess._set_returncode(self, wait_retval)
+ if not self.pretend and self.returncode == os.EX_OK:
+ # If possible, update the mtime to match the remote package if
+ # the fetcher didn't already do it automatically.
+ bintree = self.pkg.root_config.trees["bintree"]
+ if bintree._remote_has_index:
+ remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
+ if remote_mtime is not None:
+ try:
+ remote_mtime = long(remote_mtime)
+ except ValueError:
+ pass
+ else:
+ try:
+ local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
+ except OSError:
+ pass
+ else:
+ if remote_mtime != local_mtime:
+ try:
+ os.utime(self.pkg_path,
+ (remote_mtime, remote_mtime))
+ except OSError:
+ pass
+
+ if self.locked:
+ self.unlock()
+
+ def lock(self):
+ """
+ This raises an AlreadyLocked exception if lock() is called
+ while a lock is already held. In order to avoid this, call
+ unlock() or check whether the "locked" attribute is True
+ or False before calling lock().
+ """
+ if self._lock_obj is not None:
+ raise self.AlreadyLocked((self._lock_obj,))
+
+ async_lock = AsynchronousLock(path=self.pkg_path,
+ scheduler=self.scheduler)
+ async_lock.start()
+
+ if async_lock.wait() != os.EX_OK:
+ # TODO: Use CompositeTask for better handling, like in EbuildPhase.
+ raise AssertionError("AsynchronousLock failed with returncode %s" \
+ % (async_lock.returncode,))
+
+ self._lock_obj = async_lock
+ self.locked = True
+
+ class AlreadyLocked(portage.exception.PortageException):
+ pass
+
+ def unlock(self):
+ if self._lock_obj is None:
+ return
+ self._lock_obj.unlock()
+ self._lock_obj = None
+ self.locked = False
+
diff --git a/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py b/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py
new file mode 100644
index 0000000..ffa4900
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py
@@ -0,0 +1,43 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from portage import os
+
+class BinpkgPrefetcher(CompositeTask):
+
+ __slots__ = ("pkg",) + \
+ ("pkg_path", "_bintree",)
+
+ def _start(self):
+ self._bintree = self.pkg.root_config.trees["bintree"]
+ fetcher = BinpkgFetcher(background=self.background,
+ logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
+ scheduler=self.scheduler)
+ self.pkg_path = fetcher.pkg_path
+ self._start_task(fetcher, self._fetcher_exit)
+
+ def _fetcher_exit(self, fetcher):
+
+ if self._default_exit(fetcher) != os.EX_OK:
+ self.wait()
+ return
+
+ verifier = BinpkgVerifier(background=self.background,
+ logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
+ scheduler=self.scheduler)
+ self._start_task(verifier, self._verifier_exit)
+
+ def _verifier_exit(self, verifier):
+ if self._default_exit(verifier) != os.EX_OK:
+ self.wait()
+ return
+
+ self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+
diff --git a/portage_with_autodep/pym/_emerge/BinpkgVerifier.py b/portage_with_autodep/pym/_emerge/BinpkgVerifier.py
new file mode 100644
index 0000000..0052967
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgVerifier.py
@@ -0,0 +1,75 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from portage.util import writemsg
+import io
+import sys
+import portage
+from portage import os
+from portage.package.ebuild.fetch import _checksum_failure_temp_file
+
+class BinpkgVerifier(AsynchronousTask):
+ __slots__ = ("logfile", "pkg", "scheduler")
+
+ def _start(self):
+ """
+ Note: Unlike a normal AsynchronousTask.start() method,
+ this one does all work is synchronously. The returncode
+ attribute will be set before it returns.
+ """
+
+ pkg = self.pkg
+ root_config = pkg.root_config
+ bintree = root_config.trees["bintree"]
+ rval = os.EX_OK
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ global_havecolor = portage.output.havecolor
+ out = io.StringIO()
+ file_exists = True
+ try:
+ sys.stdout = out
+ sys.stderr = out
+ if portage.output.havecolor:
+ portage.output.havecolor = not self.background
+ try:
+ bintree.digestCheck(pkg)
+ except portage.exception.FileNotFound:
+ writemsg("!!! Fetching Binary failed " + \
+ "for '%s'\n" % pkg.cpv, noiselevel=-1)
+ rval = 1
+ file_exists = False
+ except portage.exception.DigestException as e:
+ writemsg("\n!!! Digest verification failed:\n",
+ noiselevel=-1)
+ writemsg("!!! %s\n" % e.value[0],
+ noiselevel=-1)
+ writemsg("!!! Reason: %s\n" % e.value[1],
+ noiselevel=-1)
+ writemsg("!!! Got: %s\n" % e.value[2],
+ noiselevel=-1)
+ writemsg("!!! Expected: %s\n" % e.value[3],
+ noiselevel=-1)
+ rval = 1
+ if rval == os.EX_OK:
+ pass
+ elif file_exists:
+ pkg_path = bintree.getname(pkg.cpv)
+ head, tail = os.path.split(pkg_path)
+ temp_filename = _checksum_failure_temp_file(head, tail)
+ writemsg("File renamed to '%s'\n" % (temp_filename,),
+ noiselevel=-1)
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+ portage.output.havecolor = global_havecolor
+
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.logfile,
+ background=self.background)
+
+ self.returncode = rval
+ self.wait()
+
diff --git a/portage_with_autodep/pym/_emerge/Blocker.py b/portage_with_autodep/pym/_emerge/Blocker.py
new file mode 100644
index 0000000..9304606
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Blocker.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.Task import Task
+
+class Blocker(Task):
+
+ __hash__ = Task.__hash__
+ __slots__ = ("root", "atom", "cp", "eapi", "priority", "satisfied")
+
+ def __init__(self, **kwargs):
+ Task.__init__(self, **kwargs)
+ self.cp = self.atom.cp
+ self._hash_key = ("blocks", self.root, self.atom, self.eapi)
+ self._hash_value = hash(self._hash_key)
diff --git a/portage_with_autodep/pym/_emerge/BlockerCache.py b/portage_with_autodep/pym/_emerge/BlockerCache.py
new file mode 100644
index 0000000..5c4f43e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerCache.py
@@ -0,0 +1,182 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.util import writemsg
+from portage.data import secpass
+import portage
+from portage import os
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class BlockerCache(portage.cache.mappings.MutableMapping):
+ """This caches blockers of installed packages so that dep_check does not
+ have to be done for every single installed package on every invocation of
+ emerge. The cache is invalidated whenever it is detected that something
+ has changed that might alter the results of dep_check() calls:
+ 1) the set of installed packages (including COUNTER) has changed
+ """
+
+ # Number of uncached packages to trigger cache update, since
+ # it's wasteful to update it for every vdb change.
+ _cache_threshold = 5
+
+ class BlockerData(object):
+
+ __slots__ = ("__weakref__", "atoms", "counter")
+
+ def __init__(self, counter, atoms):
+ self.counter = counter
+ self.atoms = atoms
+
+ def __init__(self, myroot, vardb):
+ """ myroot is ignored in favour of EROOT """
+ self._vardb = vardb
+ self._cache_filename = os.path.join(vardb.settings['EROOT'],
+ portage.CACHE_PATH, "vdb_blockers.pickle")
+ self._cache_version = "1"
+ self._cache_data = None
+ self._modified = set()
+ self._load()
+
+ def _load(self):
+ try:
+ f = open(self._cache_filename, mode='rb')
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ self._cache_data = mypickle.load()
+ f.close()
+ del f
+ except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
+ if isinstance(e, pickle.UnpicklingError):
+ writemsg("!!! Error loading '%s': %s\n" % \
+ (self._cache_filename, str(e)), noiselevel=-1)
+ del e
+
+ cache_valid = self._cache_data and \
+ isinstance(self._cache_data, dict) and \
+ self._cache_data.get("version") == self._cache_version and \
+ isinstance(self._cache_data.get("blockers"), dict)
+ if cache_valid:
+ # Validate all the atoms and counters so that
+ # corruption is detected as soon as possible.
+ invalid_items = set()
+ for k, v in self._cache_data["blockers"].items():
+ if not isinstance(k, basestring):
+ invalid_items.add(k)
+ continue
+ try:
+ if portage.catpkgsplit(k) is None:
+ invalid_items.add(k)
+ continue
+ except portage.exception.InvalidData:
+ invalid_items.add(k)
+ continue
+ if not isinstance(v, tuple) or \
+ len(v) != 2:
+ invalid_items.add(k)
+ continue
+ counter, atoms = v
+ if not isinstance(counter, (int, long)):
+ invalid_items.add(k)
+ continue
+ if not isinstance(atoms, (list, tuple)):
+ invalid_items.add(k)
+ continue
+ invalid_atom = False
+ for atom in atoms:
+ if not isinstance(atom, basestring):
+ invalid_atom = True
+ break
+ if atom[:1] != "!" or \
+ not portage.isvalidatom(
+ atom, allow_blockers=True):
+ invalid_atom = True
+ break
+ if invalid_atom:
+ invalid_items.add(k)
+ continue
+
+ for k in invalid_items:
+ del self._cache_data["blockers"][k]
+ if not self._cache_data["blockers"]:
+ cache_valid = False
+
+ if not cache_valid:
+ self._cache_data = {"version":self._cache_version}
+ self._cache_data["blockers"] = {}
+ self._modified.clear()
+
+ def flush(self):
+ """If the current user has permission and the internal blocker cache
+ been updated, save it to disk and mark it unmodified. This is called
+ by emerge after it has proccessed blockers for all installed packages.
+ Currently, the cache is only written if the user has superuser
+ privileges (since that's required to obtain a lock), but all users
+ have read access and benefit from faster blocker lookups (as long as
+ the entire cache is still valid). The cache is stored as a pickled
+ dict object with the following format:
+
+ {
+ version : "1",
+ "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
+ }
+ """
+ if len(self._modified) >= self._cache_threshold and \
+ secpass >= 2:
+ try:
+ f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
+ pickle.dump(self._cache_data, f, protocol=2)
+ f.close()
+ portage.util.apply_secpass_permissions(
+ self._cache_filename, gid=portage.portage_gid, mode=0o644)
+ except (IOError, OSError) as e:
+ pass
+ self._modified.clear()
+
+ def __setitem__(self, cpv, blocker_data):
+ """
+ Update the cache and mark it as modified for a future call to
+ self.flush().
+
+ @param cpv: Package for which to cache blockers.
+ @type cpv: String
+ @param blocker_data: An object with counter and atoms attributes.
+ @type blocker_data: BlockerData
+ """
+ self._cache_data["blockers"][cpv] = \
+ (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
+ self._modified.add(cpv)
+
+ def __iter__(self):
+ if self._cache_data is None:
+ # triggered by python-trace
+ return iter([])
+ return iter(self._cache_data["blockers"])
+
+ def __len__(self):
+ """This needs to be implemented in order to avoid
+ infinite recursion in some cases."""
+ return len(self._cache_data["blockers"])
+
+ def __delitem__(self, cpv):
+ del self._cache_data["blockers"][cpv]
+
+ def __getitem__(self, cpv):
+ """
+ @rtype: BlockerData
+ @returns: An object with counter and atoms attributes.
+ """
+ return self.BlockerData(*self._cache_data["blockers"][cpv])
+
diff --git a/portage_with_autodep/pym/_emerge/BlockerDB.py b/portage_with_autodep/pym/_emerge/BlockerDB.py
new file mode 100644
index 0000000..4819749
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerDB.py
@@ -0,0 +1,124 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+from portage import os
+from portage import digraph
+from portage._sets.base import InternalPackageSet
+
+from _emerge.BlockerCache import BlockerCache
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class BlockerDB(object):
+
+ def __init__(self, fake_vartree):
+ root_config = fake_vartree._root_config
+ self._root_config = root_config
+ self._vartree = root_config.trees["vartree"]
+ self._portdb = root_config.trees["porttree"].dbapi
+
+ self._dep_check_trees = None
+ self._fake_vartree = fake_vartree
+ self._dep_check_trees = {
+ self._vartree.root : {
+ "porttree" : fake_vartree,
+ "vartree" : fake_vartree,
+ }}
+
+ def findInstalledBlockers(self, new_pkg):
+ """
+ Search for installed run-time blockers in the root where
+ new_pkg is planned to be installed. This ignores build-time
+ blockers, since new_pkg is assumed to be built already.
+ """
+ blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
+ dep_keys = ["RDEPEND", "PDEPEND"]
+ settings = self._vartree.settings
+ stale_cache = set(blocker_cache)
+ fake_vartree = self._fake_vartree
+ dep_check_trees = self._dep_check_trees
+ vardb = fake_vartree.dbapi
+ installed_pkgs = list(vardb)
+
+ for inst_pkg in installed_pkgs:
+ stale_cache.discard(inst_pkg.cpv)
+ cached_blockers = blocker_cache.get(inst_pkg.cpv)
+ if cached_blockers is not None and \
+ cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
+ cached_blockers = None
+ if cached_blockers is not None:
+ blocker_atoms = cached_blockers.atoms
+ else:
+ # Use aux_get() to trigger FakeVartree global
+ # updates on *DEPEND when appropriate.
+ depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
+ success, atoms = portage.dep_check(depstr,
+ vardb, settings, myuse=inst_pkg.use.enabled,
+ trees=dep_check_trees, myroot=inst_pkg.root)
+ if not success:
+ pkg_location = os.path.join(inst_pkg.root,
+ portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
+ portage.writemsg("!!! %s/*DEPEND: %s\n" % \
+ (pkg_location, atoms), noiselevel=-1)
+ continue
+
+ blocker_atoms = [atom for atom in atoms \
+ if atom.startswith("!")]
+ blocker_atoms.sort()
+ counter = long(inst_pkg.metadata["COUNTER"])
+ blocker_cache[inst_pkg.cpv] = \
+ blocker_cache.BlockerData(counter, blocker_atoms)
+ for cpv in stale_cache:
+ del blocker_cache[cpv]
+ blocker_cache.flush()
+
+ blocker_parents = digraph()
+ blocker_atoms = []
+ for pkg in installed_pkgs:
+ for blocker_atom in blocker_cache[pkg.cpv].atoms:
+ blocker_atom = blocker_atom.lstrip("!")
+ blocker_atoms.append(blocker_atom)
+ blocker_parents.add(blocker_atom, pkg)
+
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+ blocking_pkgs = set()
+ for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
+ blocking_pkgs.update(blocker_parents.parent_nodes(atom))
+
+ # Check for blockers in the other direction.
+ depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
+ success, atoms = portage.dep_check(depstr,
+ vardb, settings, myuse=new_pkg.use.enabled,
+ trees=dep_check_trees, myroot=new_pkg.root)
+ if not success:
+ # We should never get this far with invalid deps.
+ show_invalid_depstring_notice(new_pkg, depstr, atoms)
+ assert False
+
+ blocker_atoms = [atom.lstrip("!") for atom in atoms \
+ if atom[:1] == "!"]
+ if blocker_atoms:
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+ for inst_pkg in installed_pkgs:
+ try:
+ next(blocker_atoms.iterAtomsForPackage(inst_pkg))
+ except (portage.exception.InvalidDependString, StopIteration):
+ continue
+ blocking_pkgs.add(inst_pkg)
+
+ return blocking_pkgs
+
+ def discardBlocker(self, pkg):
+ """Discard a package from the list of potential blockers.
+ This will match any package(s) with identical cpv or cp:slot."""
+ for cpv_match in self._fake_vartree.dbapi.match_pkgs("=%s" % (pkg.cpv,)):
+ if cpv_match.cp == pkg.cp:
+ self._fake_vartree.cpv_discard(cpv_match)
+ for slot_match in self._fake_vartree.dbapi.match_pkgs(pkg.slot_atom):
+ if slot_match.cp == pkg.cp:
+ self._fake_vartree.cpv_discard(slot_match)
diff --git a/portage_with_autodep/pym/_emerge/BlockerDepPriority.py b/portage_with_autodep/pym/_emerge/BlockerDepPriority.py
new file mode 100644
index 0000000..1004a37
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerDepPriority.py
@@ -0,0 +1,13 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class BlockerDepPriority(DepPriority):
+ __slots__ = ()
+ def __int__(self):
+ return 0
+
+ def __str__(self):
+ return 'blocker'
+
+BlockerDepPriority.instance = BlockerDepPriority()
diff --git a/portage_with_autodep/pym/_emerge/CompositeTask.py b/portage_with_autodep/pym/_emerge/CompositeTask.py
new file mode 100644
index 0000000..644a69b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/CompositeTask.py
@@ -0,0 +1,157 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from portage import os
+
+class CompositeTask(AsynchronousTask):
+
+ __slots__ = ("scheduler",) + ("_current_task",)
+
+ _TASK_QUEUED = -1
+
+ def isAlive(self):
+ return self._current_task is not None
+
+ def _cancel(self):
+ if self._current_task is not None:
+ if self._current_task is self._TASK_QUEUED:
+ self.returncode = 1
+ self._current_task = None
+ else:
+ self._current_task.cancel()
+
+ def _poll(self):
+ """
+ This does a loop calling self._current_task.poll()
+ repeatedly as long as the value of self._current_task
+ keeps changing. It calls poll() a maximum of one time
+ for a given self._current_task instance. This is useful
+ since calling poll() on a task can trigger advance to
+ the next task could eventually lead to the returncode
+ being set in cases when polling only a single task would
+ not have the same effect.
+ """
+
+ prev = None
+ while True:
+ task = self._current_task
+ if task is None or \
+ task is self._TASK_QUEUED or \
+ task is prev:
+ # don't poll the same task more than once
+ break
+ task.poll()
+ prev = task
+
+ return self.returncode
+
+ def _wait(self):
+
+ prev = None
+ while True:
+ task = self._current_task
+ if task is None:
+ # don't wait for the same task more than once
+ break
+ if task is self._TASK_QUEUED:
+ if self.cancelled:
+ self.returncode = 1
+ self._current_task = None
+ break
+ else:
+ self.scheduler.schedule(condition=self._task_queued_wait)
+ if self.returncode is not None:
+ break
+ elif self.cancelled:
+ self.returncode = 1
+ self._current_task = None
+ break
+ else:
+ # try this again with new _current_task value
+ continue
+ if task is prev:
+ if self.returncode is not None:
+ # This is expected if we're being
+ # called from the task's exit listener
+ # after it's been cancelled.
+ break
+ # Before the task.wait() method returned, an exit
+ # listener should have set self._current_task to either
+ # a different task or None. Something is wrong.
+ raise AssertionError("self._current_task has not " + \
+ "changed since calling wait", self, task)
+ task.wait()
+ prev = task
+
+ return self.returncode
+
+ def _assert_current(self, task):
+ """
+ Raises an AssertionError if the given task is not the
+ same one as self._current_task. This can be useful
+ for detecting bugs.
+ """
+ if task is not self._current_task:
+ raise AssertionError("Unrecognized task: %s" % (task,))
+
+ def _default_exit(self, task):
+ """
+ Calls _assert_current() on the given task and then sets the
+ composite returncode attribute if task.returncode != os.EX_OK.
+ If the task failed then self._current_task will be set to None.
+ Subclasses can use this as a generic task exit callback.
+
+ @rtype: int
+ @returns: The task.returncode attribute.
+ """
+ self._assert_current(task)
+ if task.returncode != os.EX_OK:
+ self.returncode = task.returncode
+ self._current_task = None
+ return task.returncode
+
+ def _final_exit(self, task):
+ """
+ Assumes that task is the final task of this composite task.
+ Calls _default_exit() and sets self.returncode to the task's
+ returncode and sets self._current_task to None.
+ """
+ self._default_exit(task)
+ self._current_task = None
+ self.returncode = task.returncode
+ return self.returncode
+
+ def _default_final_exit(self, task):
+ """
+ This calls _final_exit() and then wait().
+
+ Subclasses can use this as a generic final task exit callback.
+
+ """
+ self._final_exit(task)
+ return self.wait()
+
+ def _start_task(self, task, exit_handler):
+ """
+ Register exit handler for the given task, set it
+ as self._current_task, and call task.start().
+
+ Subclasses can use this as a generic way to start
+ a task.
+
+ """
+ task.addExitListener(exit_handler)
+ self._current_task = task
+ task.start()
+
+ def _task_queued(self, task):
+ task.addStartListener(self._task_queued_start_handler)
+ self._current_task = self._TASK_QUEUED
+
+ def _task_queued_start_handler(self, task):
+ self._current_task = task
+
+ def _task_queued_wait(self):
+ return self._current_task is not self._TASK_QUEUED or \
+ self.cancelled or self.returncode is not None
diff --git a/portage_with_autodep/pym/_emerge/DepPriority.py b/portage_with_autodep/pym/_emerge/DepPriority.py
new file mode 100644
index 0000000..3c2256a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPriority.py
@@ -0,0 +1,49 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractDepPriority import AbstractDepPriority
+class DepPriority(AbstractDepPriority):
+
+ __slots__ = ("satisfied", "optional", "ignored")
+
+ def __int__(self):
+ """
+ Note: These priorities are only used for measuring hardness
+ in the circular dependency display via digraph.debug_print(),
+ and nothing more. For actual merge order calculations, the
+ measures defined by the DepPriorityNormalRange and
+ DepPrioritySatisfiedRange classes are used.
+
+ Attributes Hardness
+
+ buildtime 0
+ runtime -1
+ runtime_post -2
+ optional -3
+ (none of the above) -4
+
+ """
+
+ if self.optional:
+ return -3
+ if self.buildtime:
+ return 0
+ if self.runtime:
+ return -1
+ if self.runtime_post:
+ return -2
+ return -4
+
+ def __str__(self):
+ if self.ignored:
+ return "ignored"
+ if self.optional:
+ return "optional"
+ if self.buildtime:
+ return "buildtime"
+ if self.runtime:
+ return "runtime"
+ if self.runtime_post:
+ return "runtime_post"
+ return "soft"
+
diff --git a/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py b/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py
new file mode 100644
index 0000000..8639554
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py
@@ -0,0 +1,47 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class DepPriorityNormalRange(object):
+ """
+ DepPriority properties Index Category
+
+ buildtime HARD
+ runtime 3 MEDIUM
+ runtime_post 2 MEDIUM_SOFT
+ optional 1 SOFT
+ (none of the above) 0 NONE
+ """
+ MEDIUM = 3
+ MEDIUM_SOFT = 2
+ SOFT = 1
+ NONE = 0
+
+ @classmethod
+ def _ignore_optional(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional)
+
+ @classmethod
+ def _ignore_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or priority.runtime_post)
+
+ @classmethod
+ def _ignore_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or not priority.buildtime)
+
+ ignore_medium = _ignore_runtime
+ ignore_medium_soft = _ignore_runtime_post
+ ignore_soft = _ignore_optional
+
+DepPriorityNormalRange.ignore_priority = (
+ None,
+ DepPriorityNormalRange._ignore_optional,
+ DepPriorityNormalRange._ignore_runtime_post,
+ DepPriorityNormalRange._ignore_runtime
+)
diff --git a/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py b/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py
new file mode 100644
index 0000000..edb29df
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py
@@ -0,0 +1,85 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class DepPrioritySatisfiedRange(object):
+ """
+ DepPriority Index Category
+
+ not satisfied and buildtime HARD
+ not satisfied and runtime 6 MEDIUM
+ not satisfied and runtime_post 5 MEDIUM_SOFT
+ satisfied and buildtime 4 SOFT
+ satisfied and runtime 3 SOFT
+ satisfied and runtime_post 2 SOFT
+ optional 1 SOFT
+ (none of the above) 0 NONE
+ """
+ MEDIUM = 6
+ MEDIUM_SOFT = 5
+ SOFT = 4
+ NONE = 0
+
+ @classmethod
+ def _ignore_optional(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional)
+
+ @classmethod
+ def _ignore_satisfied_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ if priority.optional:
+ return True
+ if not priority.satisfied:
+ return False
+ return bool(priority.runtime_post)
+
+ @classmethod
+ def _ignore_satisfied_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ if priority.optional:
+ return True
+ if not priority.satisfied:
+ return False
+ return not priority.buildtime
+
+ @classmethod
+ def _ignore_satisfied_buildtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or \
+ priority.satisfied)
+
+ @classmethod
+ def _ignore_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or \
+ priority.satisfied or \
+ priority.runtime_post)
+
+ @classmethod
+ def _ignore_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.satisfied or \
+ priority.optional or \
+ not priority.buildtime)
+
+ ignore_medium = _ignore_runtime
+ ignore_medium_soft = _ignore_runtime_post
+ ignore_soft = _ignore_satisfied_buildtime
+
+
+DepPrioritySatisfiedRange.ignore_priority = (
+ None,
+ DepPrioritySatisfiedRange._ignore_optional,
+ DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
+ DepPrioritySatisfiedRange._ignore_satisfied_runtime,
+ DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
+ DepPrioritySatisfiedRange._ignore_runtime_post,
+ DepPrioritySatisfiedRange._ignore_runtime
+)
diff --git a/portage_with_autodep/pym/_emerge/Dependency.py b/portage_with_autodep/pym/_emerge/Dependency.py
new file mode 100644
index 0000000..0f746b6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Dependency.py
@@ -0,0 +1,20 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+from _emerge.SlotObject import SlotObject
+class Dependency(SlotObject):
+ __slots__ = ("atom", "blocker", "child", "depth",
+ "parent", "onlydeps", "priority", "root",
+ "collapsed_parent", "collapsed_priority")
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ if self.priority is None:
+ self.priority = DepPriority()
+ if self.depth is None:
+ self.depth = 0
+ if self.collapsed_parent is None:
+ self.collapsed_parent = self.parent
+ if self.collapsed_priority is None:
+ self.collapsed_priority = self.priority
+
diff --git a/portage_with_autodep/pym/_emerge/DependencyArg.py b/portage_with_autodep/pym/_emerge/DependencyArg.py
new file mode 100644
index 0000000..861d837
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DependencyArg.py
@@ -0,0 +1,33 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+from portage import _encodings, _unicode_encode, _unicode_decode
+
+class DependencyArg(object):
+ def __init__(self, arg=None, root_config=None):
+ self.arg = arg
+ self.root_config = root_config
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self.arg == other.arg and \
+ self.root_config.root == other.root_config.root
+
+ def __hash__(self):
+ return hash((self.arg, self.root_config.root))
+
+ def __str__(self):
+ # Force unicode format string for python-2.x safety,
+ # ensuring that self.arg.__unicode__() is used
+ # when necessary.
+ return _unicode_decode("%s") % (self.arg,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(), encoding=_encodings['content'])
diff --git a/portage_with_autodep/pym/_emerge/EbuildBinpkg.py b/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
new file mode 100644
index 0000000..b7d43ba
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
@@ -0,0 +1,46 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildPhase import EbuildPhase
+from portage import os
+
+class EbuildBinpkg(CompositeTask):
+ """
+ This assumes that src_install() has successfully completed.
+ """
+ __slots__ = ('pkg', 'settings') + \
+ ('_binpkg_tmpfile',)
+
+ def _start(self):
+ pkg = self.pkg
+ root_config = pkg.root_config
+ bintree = root_config.trees["bintree"]
+ bintree.prevent_collision(pkg.cpv)
+ binpkg_tmpfile = os.path.join(bintree.pkgdir,
+ pkg.cpv + ".tbz2." + str(os.getpid()))
+ bintree._ensure_dir(os.path.dirname(binpkg_tmpfile))
+
+ self._binpkg_tmpfile = binpkg_tmpfile
+ self.settings["PORTAGE_BINPKG_TMPFILE"] = self._binpkg_tmpfile
+
+ package_phase = EbuildPhase(background=self.background,
+ phase='package', scheduler=self.scheduler,
+ settings=self.settings)
+
+ self._start_task(package_phase, self._package_phase_exit)
+
+ def _package_phase_exit(self, package_phase):
+
+ self.settings.pop("PORTAGE_BINPKG_TMPFILE", None)
+ if self._default_exit(package_phase) != os.EX_OK:
+ self.wait()
+ return
+
+ pkg = self.pkg
+ bintree = pkg.root_config.trees["bintree"]
+ bintree.inject(pkg.cpv, filename=self._binpkg_tmpfile)
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
diff --git a/portage_with_autodep/pym/_emerge/EbuildBuild.py b/portage_with_autodep/pym/_emerge/EbuildBuild.py
new file mode 100644
index 0000000..1c423a3
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBuild.py
@@ -0,0 +1,426 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildExecuter import EbuildExecuter
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.EbuildBinpkg import EbuildBinpkg
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildFetchonly import EbuildFetchonly
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EventsAnalyser import EventsAnalyser, FilterProcGenerator
+from _emerge.EventsLogger import EventsLogger
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from portage.util import writemsg
+import portage
+from portage import os
+from portage.output import colorize
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
+
+class EbuildBuild(CompositeTask):
+
+ __slots__ = ("args_set", "config_pool", "find_blockers",
+ "ldpath_mtimes", "logger", "logserver", "opts", "pkg", "pkg_count",
+ "prefetcher", "settings", "world_atom") + \
+ ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
+
+ def _start(self):
+
+ pkg = self.pkg
+ settings = self.settings
+
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ self.returncode = rval
+ self._current_task = None
+ self.wait()
+ return
+
+ root_config = pkg.root_config
+ tree = "porttree"
+ self._tree = tree
+ portdb = root_config.trees[tree].dbapi
+ settings.setcpv(pkg)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ if self.opts.buildpkgonly:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+ else:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
+ self._ebuild_path = ebuild_path
+ portage.doebuild_environment(ebuild_path, 'setup',
+ settings=self.settings, db=portdb)
+
+ # Check the manifest here since with --keep-going mode it's
+ # currently possible to get this far with a broken manifest.
+ if not self._check_manifest():
+ self.returncode = 1
+ self._current_task = None
+ self.wait()
+ return
+
+ prefetcher = self.prefetcher
+ if prefetcher is None:
+ pass
+ elif prefetcher.isAlive() and \
+ prefetcher.poll() is None:
+
+ waiting_msg = "Fetching files " + \
+ "in the background. " + \
+ "To view fetch progress, run `tail -f " + \
+ "/var/log/emerge-fetch.log` in another " + \
+ "terminal."
+ msg_prefix = colorize("GOOD", " * ")
+ from textwrap import wrap
+ waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
+ for line in wrap(waiting_msg, 65))
+ if not self.background:
+ writemsg(waiting_msg, noiselevel=-1)
+
+ self._current_task = prefetcher
+ prefetcher.addExitListener(self._prefetch_exit)
+ return
+
+ self._prefetch_exit(prefetcher)
+
+ def _check_manifest(self):
+ success = True
+
+ settings = self.settings
+ if 'strict' in settings.features:
+ settings['O'] = os.path.dirname(self._ebuild_path)
+ quiet_setting = settings.get('PORTAGE_QUIET')
+ settings['PORTAGE_QUIET'] = '1'
+ try:
+ success = digestcheck([], settings, strict=True)
+ finally:
+ if quiet_setting:
+ settings['PORTAGE_QUIET'] = quiet_setting
+ else:
+ del settings['PORTAGE_QUIET']
+
+ return success
+
+ def _prefetch_exit(self, prefetcher):
+
+ opts = self.opts
+ pkg = self.pkg
+ settings = self.settings
+
+ if opts.fetchonly:
+ if opts.pretend:
+ fetcher = EbuildFetchonly(
+ fetch_all=opts.fetch_all_uri,
+ pkg=pkg, pretend=opts.pretend,
+ settings=settings)
+ retval = fetcher.execute()
+ self.returncode = retval
+ self.wait()
+ return
+ else:
+ fetcher = EbuildFetcher(
+ config_pool=self.config_pool,
+ ebuild_path=self._ebuild_path,
+ fetchall=self.opts.fetch_all_uri,
+ fetchonly=self.opts.fetchonly,
+ background=False,
+ logfile=None,
+ pkg=self.pkg,
+ scheduler=self.scheduler)
+ self._start_task(fetcher, self._fetchonly_exit)
+ return
+
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=settings)
+ self._build_dir.lock()
+
+ # Cleaning needs to happen before fetch, since the build dir
+ # is used for log handling.
+ msg = " === (%s of %s) Cleaning (%s::%s)" % \
+ (self.pkg_count.curval, self.pkg_count.maxval,
+ self.pkg.cpv, self._ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Clean" % \
+ (self.pkg_count.curval, self.pkg_count.maxval, self.pkg.cpv)
+ self.logger.log(msg, short_msg=short_msg)
+
+ pre_clean_phase = EbuildPhase(background=self.background,
+ phase='clean', scheduler=self.scheduler, settings=self.settings)
+ self._start_task(pre_clean_phase, self._pre_clean_exit)
+
+ def _fetchonly_exit(self, fetcher):
+ self._final_exit(fetcher)
+ if self.returncode != os.EX_OK:
+ portdb = self.pkg.root_config.trees[self._tree].dbapi
+ spawn_nofetch(portdb, self._ebuild_path, settings=self.settings)
+ self.wait()
+
+ def _pre_clean_exit(self, pre_clean_phase):
+ if self._default_exit(pre_clean_phase) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ # for log handling
+ portage.prepare_build_dirs(self.pkg.root, self.settings, 1)
+
+ fetcher = EbuildFetcher(config_pool=self.config_pool,
+ ebuild_path=self._ebuild_path,
+ fetchall=self.opts.fetch_all_uri,
+ fetchonly=self.opts.fetchonly,
+ background=self.background,
+ logfile=self.settings.get('PORTAGE_LOG_FILE'),
+ pkg=self.pkg, scheduler=self.scheduler)
+
+ try:
+ already_fetched = fetcher.already_fetched(self.settings)
+ except portage.exception.InvalidDependString as e:
+ msg_lines = []
+ msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
+ (self.pkg.cpv, e)
+ msg_lines.append(msg)
+ fetcher._eerror(msg_lines)
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self.returncode = 1
+ self._current_task = None
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ if already_fetched:
+ # This case is optimized to skip the fetch queue.
+ fetcher = None
+ self._fetch_exit(fetcher)
+ return
+
+ # Allow the Scheduler's fetch queue to control the
+ # number of concurrent fetchers.
+ fetcher.addExitListener(self._fetch_exit)
+ self._task_queued(fetcher)
+ self.scheduler.fetch.schedule(fetcher)
+
+ def _fetch_exit(self, fetcher):
+
+ if fetcher is not None and \
+ self._default_exit(fetcher) != os.EX_OK:
+ self._fetch_failed()
+ return
+
+ # discard successful fetch log
+ self._build_dir.clean_log()
+ pkg = self.pkg
+ logger = self.logger
+ opts = self.opts
+ pkg_count = self.pkg_count
+ scheduler = self.scheduler
+ settings = self.settings
+ features = settings.features
+ ebuild_path = self._ebuild_path
+ system_set = pkg.root_config.sets["system"]
+
+ #buildsyspkg: Check if we need to _force_ binary package creation
+ self._issyspkg = "buildsyspkg" in features and \
+ system_set.findAtomForPackage(pkg) and \
+ not opts.buildpkg
+
+ if opts.buildpkg or self._issyspkg:
+
+ self._buildpkg = True
+
+ msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Compile" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ else:
+ msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Compile" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ build = EbuildExecuter(background=self.background, pkg=pkg,
+ scheduler=scheduler, settings=settings)
+
+ build.addStartListener(self._build_start)
+ build.addExitListener(self._build_stop)
+
+ self._start_task(build, self._build_exit)
+
+ def _build_start(self,phase):
+ if "depcheck" in self.settings["FEATURES"] or \
+ "depcheckstrict" in self.settings["FEATURES"]:
+ # Lets start a log listening server
+ temp_path=self.settings.get("T",self.settings["PORTAGE_TMPDIR"])
+
+ if "depcheckstrict" not in self.settings["FEATURES"]:
+ # use default filter_proc
+ self.logserver=EventsLogger(socket_dir=temp_path)
+ else:
+ portage.util.writemsg("Getting list of allowed files..." + \
+ "This may take some time\n")
+ filter_gen=FilterProcGenerator(self.pkg.cpv, self.settings)
+ filter_proc=filter_gen.get_filter_proc()
+ self.logserver=EventsLogger(socket_dir=temp_path,
+ filter_proc=filter_proc)
+
+ self.logserver.start()
+
+ # Copy socket path to LOG_SOCKET environment variable
+ env=self.settings.configdict["pkg"]
+ env['LOG_SOCKET'] = self.logserver.socket_name
+
+ #import pdb; pdb.set_trace()
+
+ def _build_stop(self,phase):
+ if "depcheck" in self.settings["FEATURES"] or \
+ "depcheckstrict" in self.settings["FEATURES"]:
+ # Delete LOG_SOCKET from environment
+ env=self.settings.configdict["pkg"]
+ if 'LOG_SOCKET' in env:
+ del env['LOG_SOCKET']
+
+ events=self.logserver.stop()
+ self.logserver=None
+ analyser=EventsAnalyser(self.pkg.cpv, events, self.settings)
+ analyser.display() # show the analyse
+
+ #import pdb; pdb.set_trace()
+
+
+
+ def _fetch_failed(self):
+ # We only call the pkg_nofetch phase if either RESTRICT=fetch
+ # is set or the package has explicitly overridden the default
+ # pkg_nofetch implementation. This allows specialized messages
+ # to be displayed for problematic packages even though they do
+ # not set RESTRICT=fetch (bug #336499).
+
+ if 'fetch' not in self.pkg.metadata.restrict and \
+ 'nofetch' not in self.pkg.metadata.defined_phases:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ self.returncode = None
+ nofetch_phase = EbuildPhase(background=self.background,
+ phase='nofetch', scheduler=self.scheduler, settings=self.settings)
+ self._start_task(nofetch_phase, self._nofetch_exit)
+
+ def _nofetch_exit(self, nofetch_phase):
+ self._final_exit(nofetch_phase)
+ self._unlock_builddir()
+ self.returncode = 1
+ self.wait()
+
+ def _unlock_builddir(self):
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self._build_dir.unlock()
+
+ def _build_exit(self, build):
+ if self._default_exit(build) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ buildpkg = self._buildpkg
+
+ if not buildpkg:
+ self._final_exit(build)
+ self.wait()
+ return
+
+ if self._issyspkg:
+ msg = ">>> This is a system package, " + \
+ "let's pack a rescue tarball.\n"
+ self.scheduler.output(msg,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+ packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
+ scheduler=self.scheduler, settings=self.settings)
+
+ self._start_task(packager, self._buildpkg_exit)
+
+ def _buildpkg_exit(self, packager):
+ """
+ Released build dir lock when there is a failure or
+ when in buildpkgonly mode. Otherwise, the lock will
+ be released when merge() is called.
+ """
+
+ if self._default_exit(packager) != os.EX_OK:
+ self._unlock_builddir()
+ self.wait()
+ return
+
+ if self.opts.buildpkgonly:
+ phase = 'success_hooks'
+ success_hooks = MiscFunctionsProcess(
+ background=self.background,
+ commands=[phase], phase=phase,
+ scheduler=self.scheduler, settings=self.settings)
+ self._start_task(success_hooks,
+ self._buildpkgonly_success_hook_exit)
+ return
+
+ # Continue holding the builddir lock until
+ # after the package has been installed.
+ self._current_task = None
+ self.returncode = packager.returncode
+ self.wait()
+
+ def _buildpkgonly_success_hook_exit(self, success_hooks):
+ self._default_exit(success_hooks)
+ self.returncode = None
+ # Need to call "clean" phase for buildpkgonly mode
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ phase = 'clean'
+ clean_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler, settings=self.settings)
+ self._start_task(clean_phase, self._clean_exit)
+
+ def _clean_exit(self, clean_phase):
+ if self._final_exit(clean_phase) != os.EX_OK or \
+ self.opts.buildpkgonly:
+ self._unlock_builddir()
+ self.wait()
+
+ def create_install_task(self):
+ """
+ Install the package and then clean up and release locks.
+ Only call this after the build has completed successfully
+ and neither fetchonly nor buildpkgonly mode are enabled.
+ """
+
+ ldpath_mtimes = self.ldpath_mtimes
+ logger = self.logger
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ settings = self.settings
+ world_atom = self.world_atom
+ ebuild_path = self._ebuild_path
+ tree = self._tree
+
+ task = EbuildMerge(find_blockers=self.find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
+ pkg_count=pkg_count, pkg_path=ebuild_path,
+ scheduler=self.scheduler,
+ settings=settings, tree=tree, world_atom=world_atom)
+
+ msg = " === (%s of %s) Merging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval,
+ pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Merge" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ task.addExitListener(self._install_exit)
+ return task
+
+ def _install_exit(self, task):
+ self._unlock_builddir()
diff --git a/portage_with_autodep/pym/_emerge/EbuildBuildDir.py b/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
new file mode 100644
index 0000000..ddc5fe0
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
@@ -0,0 +1,109 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.SlotObject import SlotObject
+import portage
+from portage import os
+from portage.exception import PortageException
+import errno
+
+class EbuildBuildDir(SlotObject):
+
+ __slots__ = ("scheduler", "settings",
+ "locked", "_catdir", "_lock_obj")
+
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self.locked = False
+
+ def lock(self):
+ """
+ This raises an AlreadyLocked exception if lock() is called
+ while a lock is already held. In order to avoid this, call
+ unlock() or check whether the "locked" attribute is True
+ or False before calling lock().
+ """
+ if self._lock_obj is not None:
+ raise self.AlreadyLocked((self._lock_obj,))
+
+ dir_path = self.settings.get('PORTAGE_BUILDDIR')
+ if not dir_path:
+ raise AssertionError('PORTAGE_BUILDDIR is unset')
+ catdir = os.path.dirname(dir_path)
+ self._catdir = catdir
+
+ try:
+ portage.util.ensure_dirs(os.path.dirname(catdir),
+ gid=portage.portage_gid,
+ mode=0o70, mask=0)
+ except PortageException:
+ if not os.path.isdir(os.path.dirname(catdir)):
+ raise
+ catdir_lock = AsynchronousLock(path=catdir, scheduler=self.scheduler)
+ catdir_lock.start()
+ catdir_lock.wait()
+ self._assert_lock(catdir_lock)
+
+ try:
+ try:
+ portage.util.ensure_dirs(catdir,
+ gid=portage.portage_gid,
+ mode=0o70, mask=0)
+ except PortageException:
+ if not os.path.isdir(catdir):
+ raise
+ builddir_lock = AsynchronousLock(path=dir_path,
+ scheduler=self.scheduler)
+ builddir_lock.start()
+ builddir_lock.wait()
+ self._assert_lock(builddir_lock)
+ self._lock_obj = builddir_lock
+ self.settings['PORTAGE_BUILDIR_LOCKED'] = '1'
+ finally:
+ self.locked = self._lock_obj is not None
+ catdir_lock.unlock()
+
+ def _assert_lock(self, async_lock):
+ if async_lock.returncode != os.EX_OK:
+ # TODO: create a better way to propagate this error to the caller
+ raise AssertionError("AsynchronousLock failed with returncode %s" \
+ % (async_lock.returncode,))
+
+ def clean_log(self):
+ """Discard existing log. The log will not be be discarded
+ in cases when it would not make sense, like when FEATURES=keepwork
+ is enabled."""
+ settings = self.settings
+ if 'keepwork' in settings.features:
+ return
+ log_file = settings.get('PORTAGE_LOG_FILE')
+ if log_file is not None and os.path.isfile(log_file):
+ try:
+ os.unlink(log_file)
+ except OSError:
+ pass
+
+ def unlock(self):
+ if self._lock_obj is None:
+ return
+
+ self._lock_obj.unlock()
+ self._lock_obj = None
+ self.locked = False
+ self.settings.pop('PORTAGE_BUILDIR_LOCKED', None)
+ catdir_lock = AsynchronousLock(path=self._catdir, scheduler=self.scheduler)
+ catdir_lock.start()
+ if catdir_lock.wait() == os.EX_OK:
+ try:
+ os.rmdir(self._catdir)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT,
+ errno.ENOTEMPTY, errno.EEXIST, errno.EPERM):
+ raise
+ finally:
+ catdir_lock.unlock()
+
+ class AlreadyLocked(portage.exception.PortageException):
+ pass
+
diff --git a/portage_with_autodep/pym/_emerge/EbuildExecuter.py b/portage_with_autodep/pym/_emerge/EbuildExecuter.py
new file mode 100644
index 0000000..f8febd4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildExecuter.py
@@ -0,0 +1,99 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.TaskSequence import TaskSequence
+from _emerge.CompositeTask import CompositeTask
+import portage
+from portage import os
+from portage.eapi import eapi_has_src_prepare_and_src_configure, \
+ eapi_exports_replace_vars
+from portage.package.ebuild.doebuild import _prepare_fake_distdir
+
+class EbuildExecuter(CompositeTask):
+
+ __slots__ = ("pkg", "scheduler", "settings")
+
+ _phases = ("prepare", "configure", "compile", "test", "install")
+
+ _live_eclasses = frozenset([
+ "bzr",
+ "cvs",
+ "darcs",
+ "git",
+ "git-2",
+ "mercurial",
+ "subversion",
+ "tla",
+ ])
+
+ def _start(self):
+ pkg = self.pkg
+ scheduler = self.scheduler
+ settings = self.settings
+ cleanup = 0
+ portage.prepare_build_dirs(pkg.root, settings, cleanup)
+
+ portdb = pkg.root_config.trees['porttree'].dbapi
+ ebuild_path = settings['EBUILD']
+ alist = settings.configdict["pkg"].get("A", "").split()
+ _prepare_fake_distdir(settings, alist)
+
+ if eapi_exports_replace_vars(settings['EAPI']):
+ vardb = pkg.root_config.trees['vartree'].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(pkg.slot_atom) + \
+ vardb.match('='+pkg.cpv)))
+
+ setup_phase = EbuildPhase(background=self.background,
+ phase="setup", scheduler=scheduler,
+ settings=settings)
+
+ setup_phase.addExitListener(self._setup_exit)
+ self._task_queued(setup_phase)
+ self.scheduler.scheduleSetup(setup_phase)
+
+ def _setup_exit(self, setup_phase):
+
+ if self._default_exit(setup_phase) != os.EX_OK:
+ self.wait()
+ return
+
+ unpack_phase = EbuildPhase(background=self.background,
+ phase="unpack", scheduler=self.scheduler,
+ settings=self.settings)
+
+ if self._live_eclasses.intersection(self.pkg.inherited):
+ # Serialize $DISTDIR access for live ebuilds since
+ # otherwise they can interfere with eachother.
+
+ unpack_phase.addExitListener(self._unpack_exit)
+ self._task_queued(unpack_phase)
+ self.scheduler.scheduleUnpack(unpack_phase)
+
+ else:
+ self._start_task(unpack_phase, self._unpack_exit)
+
+ def _unpack_exit(self, unpack_phase):
+
+ if self._default_exit(unpack_phase) != os.EX_OK:
+ self.wait()
+ return
+
+ ebuild_phases = TaskSequence(scheduler=self.scheduler)
+
+ pkg = self.pkg
+ phases = self._phases
+ eapi = pkg.metadata["EAPI"]
+ if not eapi_has_src_prepare_and_src_configure(eapi):
+ # skip src_prepare and src_configure
+ phases = phases[2:]
+
+ for phase in phases:
+ ebuild_phases.add(EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler,
+ settings=self.settings))
+
+ self._start_task(ebuild_phases, self._default_final_exit)
+
diff --git a/portage_with_autodep/pym/_emerge/EbuildFetcher.py b/portage_with_autodep/pym/_emerge/EbuildFetcher.py
new file mode 100644
index 0000000..feb68d0
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildFetcher.py
@@ -0,0 +1,302 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import traceback
+
+from _emerge.SpawnProcess import SpawnProcess
+import copy
+import io
+import signal
+import sys
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.elog.messages import eerror
+from portage.package.ebuild.fetch import _check_distfile, fetch
+from portage.util._pty import _create_pty_or_pipe
+
+class EbuildFetcher(SpawnProcess):
+
+ __slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
+ "pkg", "prefetch") + \
+ ("_digests", "_settings", "_uri_map")
+
+ def already_fetched(self, settings):
+ """
+ Returns True if all files already exist locally and have correct
+ digests, otherwise return False. When returning True, appropriate
+ digest checking messages are produced for display and/or logging.
+ When returning False, no messages are produced, since we assume
+ that a fetcher process will later be executed in order to produce
+ such messages. This will raise InvalidDependString if SRC_URI is
+ invalid.
+ """
+
+ uri_map = self._get_uri_map()
+ if not uri_map:
+ return True
+
+ digests = self._get_digests()
+ distdir = settings["DISTDIR"]
+ allow_missing = "allow-missing-manifests" in settings.features
+
+ for filename in uri_map:
+ # Use stat rather than lstat since fetch() creates
+ # symlinks when PORTAGE_RO_DISTDIRS is used.
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError:
+ return False
+ if st.st_size == 0:
+ return False
+ expected_size = digests.get(filename, {}).get('size')
+ if expected_size is None:
+ continue
+ if st.st_size != expected_size:
+ return False
+
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ global_havecolor = portage.output.havecolor
+ out = io.StringIO()
+ eout = portage.output.EOutput()
+ eout.quiet = settings.get("PORTAGE_QUIET") == "1"
+ success = True
+ try:
+ sys.stdout = out
+ sys.stderr = out
+ if portage.output.havecolor:
+ portage.output.havecolor = not self.background
+
+ for filename in uri_map:
+ mydigests = digests.get(filename)
+ if mydigests is None:
+ if not allow_missing:
+ success = False
+ break
+ continue
+ ok, st = _check_distfile(os.path.join(distdir, filename),
+ mydigests, eout, show_errors=False)
+ if not ok:
+ success = False
+ break
+ except portage.exception.FileNotFound:
+ # A file disappeared unexpectedly.
+ return False
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+ portage.output.havecolor = global_havecolor
+
+ if success:
+ # When returning unsuccessfully, no messages are produced, since
+ # we assume that a fetcher process will later be executed in order
+ # to produce such messages.
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.logfile)
+
+ return success
+
+ def _start(self):
+
+ root_config = self.pkg.root_config
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = self._get_ebuild_path()
+
+ try:
+ uri_map = self._get_uri_map()
+ except portage.exception.InvalidDependString as e:
+ msg_lines = []
+ msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
+ (self.pkg.cpv, e)
+ msg_lines.append(msg)
+ self._eerror(msg_lines)
+ self._set_returncode((self.pid, 1 << 8))
+ self.wait()
+ return
+
+ if not uri_map:
+ # Nothing to fetch.
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self.wait()
+ return
+
+ settings = self.config_pool.allocate()
+ settings.setcpv(self.pkg)
+ portage.doebuild_environment(ebuild_path, 'fetch',
+ settings=settings, db=portdb)
+
+ if self.prefetch and \
+ self._prefetch_size_ok(uri_map, settings, ebuild_path):
+ self.config_pool.deallocate(settings)
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self.wait()
+ return
+
+ nocolor = settings.get("NOCOLOR")
+
+ if self.prefetch:
+ settings["PORTAGE_PARALLEL_FETCHONLY"] = "1"
+
+ if self.background:
+ nocolor = "true"
+
+ if nocolor is not None:
+ settings["NOCOLOR"] = nocolor
+
+ self._settings = settings
+ SpawnProcess._start(self)
+
+ # Free settings now since it's no longer needed in
+ # this process (the subprocess has a private copy).
+ self.config_pool.deallocate(settings)
+ settings = None
+ self._settings = None
+
+ def _spawn(self, args, fd_pipes=None, **kwargs):
+ """
+ Fork a subprocess, apply local settings, and call fetch().
+ """
+
+ pid = os.fork()
+ if pid != 0:
+ portage.process.spawned_pids.append(pid)
+ return [pid]
+
+ portage.process._setup_pipes(fd_pipes)
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ # Force consistent color output, in case we are capturing fetch
+ # output through a normal pipe due to unavailability of ptys.
+ portage.output.havecolor = self._settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ rval = 1
+ allow_missing = 'allow-missing-manifests' in self._settings.features
+ try:
+ if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
+ digests=copy.deepcopy(self._get_digests()),
+ allow_missing_digests=allow_missing):
+ rval = os.EX_OK
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ finally:
+ # Call os._exit() from finally block, in order to suppress any
+ # finally blocks from earlier in the call stack. See bug #345289.
+ os._exit(rval)
+
+ def _get_ebuild_path(self):
+ if self.ebuild_path is not None:
+ return self.ebuild_path
+ portdb = self.pkg.root_config.trees["porttree"].dbapi
+ self.ebuild_path = portdb.findname(self.pkg.cpv, myrepo=self.pkg.repo)
+ if self.ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % self.pkg.cpv)
+ return self.ebuild_path
+
+ def _get_digests(self):
+ if self._digests is not None:
+ return self._digests
+ self._digests = portage.Manifest(os.path.dirname(
+ self._get_ebuild_path()), None).getTypeDigests("DIST")
+ return self._digests
+
+ def _get_uri_map(self):
+ """
+ This can raise InvalidDependString from portdbapi.getFetchMap().
+ """
+ if self._uri_map is not None:
+ return self._uri_map
+ pkgdir = os.path.dirname(self._get_ebuild_path())
+ mytree = os.path.dirname(os.path.dirname(pkgdir))
+ use = None
+ if not self.fetchall:
+ use = self.pkg.use.enabled
+ portdb = self.pkg.root_config.trees["porttree"].dbapi
+ self._uri_map = portdb.getFetchMap(self.pkg.cpv,
+ useflags=use, mytree=mytree)
+ return self._uri_map
+
+ def _prefetch_size_ok(self, uri_map, settings, ebuild_path):
+ distdir = settings["DISTDIR"]
+
+ sizes = {}
+ for filename in uri_map:
+ # Use stat rather than lstat since portage.fetch() creates
+ # symlinks when PORTAGE_RO_DISTDIRS is used.
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError:
+ return False
+ if st.st_size == 0:
+ return False
+ sizes[filename] = st.st_size
+
+ digests = self._get_digests()
+ for filename, actual_size in sizes.items():
+ size = digests.get(filename, {}).get('size')
+ if size is None:
+ continue
+ if size != actual_size:
+ return False
+
+ # All files are present and sizes are ok. In this case the normal
+ # fetch code will be skipped, so we need to generate equivalent
+ # output here.
+ if self.logfile is not None:
+ f = io.open(_unicode_encode(self.logfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ for filename in uri_map:
+ f.write(_unicode_decode((' * %s size ;-) ...' % \
+ filename).ljust(73) + '[ ok ]\n'))
+ f.close()
+
+ return True
+
+ def _pipe(self, fd_pipes):
+ """When appropriate, use a pty so that fetcher progress bars,
+ like wget has, will work properly."""
+ if self.background or not sys.stdout.isatty():
+ # When the output only goes to a log file,
+ # there's no point in creating a pty.
+ return os.pipe()
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _eerror(self, lines):
+ out = io.StringIO()
+ for line in lines:
+ eerror(line, phase="unpack", key=self.pkg.cpv, out=out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.logfile)
+
+ def _set_returncode(self, wait_retval):
+ SpawnProcess._set_returncode(self, wait_retval)
+ # Collect elog messages that might have been
+ # created by the pkg_nofetch phase.
+ # Skip elog messages for prefetch, in order to avoid duplicates.
+ if not self.prefetch and self.returncode != os.EX_OK:
+ msg_lines = []
+ msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
+ if self.logfile is not None:
+ msg += ", Log file:"
+ msg_lines.append(msg)
+ if self.logfile is not None:
+ msg_lines.append(" '%s'" % (self.logfile,))
+ self._eerror(msg_lines)
diff --git a/portage_with_autodep/pym/_emerge/EbuildFetchonly.py b/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
new file mode 100644
index 0000000..b898971
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
@@ -0,0 +1,32 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SlotObject import SlotObject
+import portage
+from portage import os
+from portage.elog.messages import eerror
+
+class EbuildFetchonly(SlotObject):
+
+ __slots__ = ("fetch_all", "pkg", "pretend", "settings")
+
+ def execute(self):
+ settings = self.settings
+ pkg = self.pkg
+ portdb = pkg.root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
+ settings.setcpv(pkg)
+ debug = settings.get("PORTAGE_DEBUG") == "1"
+
+ rval = portage.doebuild(ebuild_path, "fetch",
+ settings["ROOT"], settings, debug=debug,
+ listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
+ mydbapi=portdb, tree="porttree")
+
+ if rval != os.EX_OK:
+ msg = "Fetch failed for '%s'" % (pkg.cpv,)
+ eerror(msg, phase="unpack", key=pkg.cpv)
+
+ return rval
diff --git a/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
new file mode 100644
index 0000000..5dabe34
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
@@ -0,0 +1,108 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import pickle
+from portage import os
+from portage.localization import _
+from portage.util import writemsg_level
+from _emerge.FifoIpcDaemon import FifoIpcDaemon
+from _emerge.PollConstants import PollConstants
+
+class EbuildIpcDaemon(FifoIpcDaemon):
+ """
+ This class serves as an IPC daemon, which ebuild processes can use
+ to communicate with portage's main python process.
+
+ Here are a few possible uses:
+
+ 1) Robust subshell/subprocess die support. This allows the ebuild
+ environment to reliably die without having to rely on signal IPC.
+
+ 2) Delegation of portageq calls to the main python process, eliminating
+ performance and userpriv permission issues.
+
+ 3) Reliable ebuild termination in cases when the ebuild has accidentally
+ left orphan processes running in the background (as in bug #278895).
+
+ 4) Detect cases in which bash has exited unexpectedly (as in bug #190128).
+ """
+
+ __slots__ = ('commands',)
+
+ def _input_handler(self, fd, event):
+ # Read the whole pickle in a single atomic read() call.
+ data = None
+ if event & PollConstants.POLLIN:
+ # For maximum portability, use os.read() here since
+ # array.fromfile() and file.read() are both known to
+ # erroneously return an empty string from this
+ # non-blocking fifo stream on FreeBSD (bug #337465).
+ try:
+ data = os.read(fd, self._bufsize)
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ # Assume that another event will be generated
+ # if there's any relevant data.
+
+ if data:
+
+ try:
+ obj = pickle.loads(data)
+ except SystemExit:
+ raise
+ except Exception:
+ # The pickle module can raise practically
+ # any exception when given corrupt data.
+ pass
+ else:
+
+ self._reopen_input()
+
+ cmd_key = obj[0]
+ cmd_handler = self.commands[cmd_key]
+ reply = cmd_handler(obj)
+ try:
+ self._send_reply(reply)
+ except OSError as e:
+ if e.errno == errno.ENXIO:
+ # This happens if the client side has been killed.
+ pass
+ else:
+ raise
+
+ # Allow the command to execute hooks after its reply
+ # has been sent. This hook is used by the 'exit'
+ # command to kill the ebuild process. For some
+ # reason, the ebuild-ipc helper hangs up the
+ # ebuild process if it is waiting for a reply
+ # when we try to kill the ebuild process.
+ reply_hook = getattr(cmd_handler,
+ 'reply_hook', None)
+ if reply_hook is not None:
+ reply_hook()
+
+ def _send_reply(self, reply):
+ # File streams are in unbuffered mode since we do atomic
+ # read and write of whole pickles. Use non-blocking mode so
+ # we don't hang if the client is killed before we can send
+ # the reply. We rely on the client opening the other side
+ # of this fifo before it sends its request, since otherwise
+ # we'd have a race condition with this open call raising
+ # ENXIO if the client hasn't opened the fifo yet.
+ try:
+ output_fd = os.open(self.output_fifo,
+ os.O_WRONLY | os.O_NONBLOCK)
+ try:
+ os.write(output_fd, pickle.dumps(reply))
+ finally:
+ os.close(output_fd)
+ except OSError as e:
+ # This probably means that the client has been killed,
+ # which causes open to fail with ENXIO.
+ writemsg_level(
+ "!!! EbuildIpcDaemon %s: %s\n" % \
+ (_('failed to send reply'), e),
+ level=logging.ERROR, noiselevel=-1)
diff --git a/portage_with_autodep/pym/_emerge/EbuildMerge.py b/portage_with_autodep/pym/_emerge/EbuildMerge.py
new file mode 100644
index 0000000..9c35988
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildMerge.py
@@ -0,0 +1,56 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from portage import os
+from portage.dbapi._MergeProcess import MergeProcess
+
+class EbuildMerge(CompositeTask):
+
+ __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
+ "pkg", "pkg_count", "pkg_path", "pretend",
+ "settings", "tree", "world_atom")
+
+ def _start(self):
+ root_config = self.pkg.root_config
+ settings = self.settings
+ mycat = settings["CATEGORY"]
+ mypkg = settings["PF"]
+ pkgloc = settings["D"]
+ infloc = os.path.join(settings["PORTAGE_BUILDDIR"], "build-info")
+ myebuild = settings["EBUILD"]
+ mydbapi = root_config.trees[self.tree].dbapi
+ vartree = root_config.trees["vartree"]
+ background = (settings.get('PORTAGE_BACKGROUND') == '1')
+ logfile = settings.get('PORTAGE_LOG_FILE')
+
+ merge_task = MergeProcess(
+ mycat=mycat, mypkg=mypkg, settings=settings,
+ treetype=self.tree, vartree=vartree, scheduler=self.scheduler,
+ background=background, blockers=self.find_blockers, pkgloc=pkgloc,
+ infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
+ prev_mtimes=self.ldpath_mtimes, logfile=logfile)
+
+ self._start_task(merge_task, self._merge_exit)
+
+ def _merge_exit(self, merge_task):
+ if self._final_exit(merge_task) != os.EX_OK:
+ self.wait()
+ return
+
+ pkg = self.pkg
+ self.world_atom(pkg)
+ pkg_count = self.pkg_count
+ pkg_path = self.pkg_path
+ logger = self.logger
+ if "noclean" not in self.settings.features:
+ short_msg = "emerge: (%s of %s) %s Clean Post" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log((" === (%s of %s) " + \
+ "Post-Build Cleaning (%s::%s)") % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
+ short_msg=short_msg)
+ logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ self.wait()
diff --git a/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
new file mode 100644
index 0000000..e53298b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
@@ -0,0 +1,133 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+from _emerge.PollConstants import PollConstants
+import sys
+from portage.cache.mappings import slot_dict_class
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import fcntl
+import io
+
+class EbuildMetadataPhase(SubProcess):
+
+ """
+ Asynchronous interface for the ebuild "depend" phase which is
+ used to extract metadata from the ebuild.
+ """
+
+ __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
+ "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
+ ("_raw_metadata",)
+
+ _file_names = ("ebuild",)
+ _files_dict = slot_dict_class(_file_names, prefix="")
+ _metadata_fd = 9
+
+ def _start(self):
+ settings = self.settings
+ settings.setcpv(self.cpv)
+ ebuild_path = self.ebuild_path
+
+ eapi = None
+ if eapi is None and \
+ 'parse-eapi-ebuild-head' in settings.features:
+ eapi = portage._parse_eapi_ebuild_head(
+ io.open(_unicode_encode(ebuild_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace'))
+
+ if eapi is not None:
+ if not portage.eapi_is_supported(eapi):
+ self.metadata_callback(self.cpv, self.ebuild_path,
+ self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self.wait()
+ return
+
+ settings.configdict['pkg']['EAPI'] = eapi
+
+ debug = settings.get("PORTAGE_DEBUG") == "1"
+ master_fd = None
+ slave_fd = None
+ fd_pipes = None
+ if self.fd_pipes is not None:
+ fd_pipes = self.fd_pipes.copy()
+ else:
+ fd_pipes = {}
+
+ fd_pipes.setdefault(0, sys.stdin.fileno())
+ fd_pipes.setdefault(1, sys.stdout.fileno())
+ fd_pipes.setdefault(2, sys.stderr.fileno())
+
+ # flush any pending output
+ for fd in fd_pipes.values():
+ if fd == sys.stdout.fileno():
+ sys.stdout.flush()
+ if fd == sys.stderr.fileno():
+ sys.stderr.flush()
+
+ fd_pipes_orig = fd_pipes.copy()
+ self._files = self._files_dict()
+ files = self._files
+
+ master_fd, slave_fd = os.pipe()
+ fcntl.fcntl(master_fd, fcntl.F_SETFL,
+ fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ fd_pipes[self._metadata_fd] = slave_fd
+
+ self._raw_metadata = []
+ files.ebuild = os.fdopen(master_fd, 'rb', 0)
+ self._reg_id = self.scheduler.register(files.ebuild.fileno(),
+ self._registered_events, self._output_handler)
+ self._registered = True
+
+ retval = portage.doebuild(ebuild_path, "depend",
+ settings["ROOT"], settings, debug,
+ mydbapi=self.portdb, tree="porttree",
+ fd_pipes=fd_pipes, returnpid=True)
+
+ os.close(slave_fd)
+
+ if isinstance(retval, int):
+ # doebuild failed before spawning
+ self._unregister()
+ self._set_returncode((self.pid, retval << 8))
+ self.wait()
+ return
+
+ self.pid = retval[0]
+ portage.process.spawned_pids.remove(self.pid)
+
+ def _output_handler(self, fd, event):
+
+ if event & PollConstants.POLLIN:
+ self._raw_metadata.append(self._files.ebuild.read())
+ if not self._raw_metadata[-1]:
+ self._unregister()
+ self.wait()
+
+ self._unregister_if_appropriate(event)
+
+ def _set_returncode(self, wait_retval):
+ SubProcess._set_returncode(self, wait_retval)
+ if self.returncode == os.EX_OK:
+ metadata_lines = ''.join(_unicode_decode(chunk,
+ encoding=_encodings['repo.content'], errors='replace')
+ for chunk in self._raw_metadata).splitlines()
+ if len(portage.auxdbkeys) != len(metadata_lines):
+ # Don't trust bash's returncode if the
+ # number of lines is incorrect.
+ self.returncode = 1
+ else:
+ metadata = zip(portage.auxdbkeys, metadata_lines)
+ self.metadata = self.metadata_callback(self.cpv,
+ self.ebuild_path, self.repo_path, metadata,
+ self.ebuild_mtime)
+
diff --git a/portage_with_autodep/pym/_emerge/EbuildPhase.py b/portage_with_autodep/pym/_emerge/EbuildPhase.py
new file mode 100644
index 0000000..82c165d
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildPhase.py
@@ -0,0 +1,350 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import io
+import sys
+import tempfile
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.EbuildProcess import EbuildProcess
+from _emerge.CompositeTask import CompositeTask
+from portage.util import writemsg
+from portage.xml.metadata import MetaDataXML
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.elog:messages@elog_messages',
+ 'portage.package.ebuild.doebuild:_check_build_log,' + \
+ '_post_phase_cmds,_post_phase_userpriv_perms,' + \
+ '_post_src_install_chost_fix,' + \
+ '_post_src_install_soname_symlinks,' + \
+ '_post_src_install_uid_fix,_postinst_bsdflags,' + \
+ '_preinst_bsdflags'
+)
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+class EbuildPhase(CompositeTask):
+
+ __slots__ = ("actionmap", "phase", "settings") + \
+ ("_ebuild_lock",)
+
+ # FEATURES displayed prior to setup phase
+ _features_display = ("ccache", "depcheck", "depcheckstrict" "distcc",
+ "distcc-pump", "fakeroot",
+ "installsources", "keeptemp", "keepwork", "nostrip",
+ "preserve-libs", "sandbox", "selinux", "sesandbox",
+ "splitdebug", "suidctl", "test", "userpriv",
+ "usersandbox")
+
+ # Locked phases
+ _locked_phases = ("setup", "preinst", "postinst", "prerm", "postrm")
+
+ def _start(self):
+
+ need_builddir = self.phase not in EbuildProcess._phases_without_builddir
+
+ if need_builddir:
+ phase_completed_file = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'],
+ ".%sed" % self.phase.rstrip('e'))
+ if not os.path.exists(phase_completed_file):
+ # If the phase is really going to run then we want
+ # to eliminate any stale elog messages that may
+ # exist from a previous run.
+ try:
+ os.unlink(os.path.join(self.settings['T'],
+ 'logging', self.phase))
+ except OSError:
+ pass
+
+ if self.phase in ('nofetch', 'pretend', 'setup'):
+
+ use = self.settings.get('PORTAGE_BUILT_USE')
+ if use is None:
+ use = self.settings['PORTAGE_USE']
+
+ maint_str = ""
+ upstr_str = ""
+ metadata_xml_path = os.path.join(os.path.dirname(self.settings['EBUILD']), "metadata.xml")
+ if os.path.isfile(metadata_xml_path):
+ herds_path = os.path.join(self.settings['PORTDIR'],
+ 'metadata/herds.xml')
+ try:
+ metadata_xml = MetaDataXML(metadata_xml_path, herds_path)
+ maint_str = metadata_xml.format_maintainer_string()
+ upstr_str = metadata_xml.format_upstream_string()
+ except SyntaxError:
+ maint_str = "<invalid metadata.xml>"
+
+ msg = []
+ msg.append("Package: %s" % self.settings.mycpv)
+ if self.settings.get('PORTAGE_REPO_NAME'):
+ msg.append("Repository: %s" % self.settings['PORTAGE_REPO_NAME'])
+ if maint_str:
+ msg.append("Maintainer: %s" % maint_str)
+ if upstr_str:
+ msg.append("Upstream: %s" % upstr_str)
+
+ msg.append("USE: %s" % use)
+ relevant_features = []
+ enabled_features = self.settings.features
+ for x in self._features_display:
+ if x in enabled_features:
+ relevant_features.append(x)
+ if relevant_features:
+ msg.append("FEATURES: %s" % " ".join(relevant_features))
+
+ # Force background=True for this header since it's intended
+ # for the log and it doesn't necessarily need to be visible
+ # elsewhere.
+ self._elog('einfo', msg, background=True)
+
+ if self.phase == 'package':
+ if 'PORTAGE_BINPKG_TMPFILE' not in self.settings:
+ self.settings['PORTAGE_BINPKG_TMPFILE'] = \
+ os.path.join(self.settings['PKGDIR'],
+ self.settings['CATEGORY'], self.settings['PF']) + '.tbz2'
+
+ if self.phase in ("pretend", "prerm"):
+ env_extractor = BinpkgEnvExtractor(background=self.background,
+ scheduler=self.scheduler, settings=self.settings)
+ if env_extractor.saved_env_exists():
+ self._start_task(env_extractor, self._env_extractor_exit)
+ return
+ # If the environment.bz2 doesn't exist, then ebuild.sh will
+ # source the ebuild as a fallback.
+
+ self._start_lock()
+
+ def _env_extractor_exit(self, env_extractor):
+ if self._default_exit(env_extractor) != os.EX_OK:
+ self.wait()
+ return
+
+ self._start_lock()
+
+ def _start_lock(self):
+ if (self.phase in self._locked_phases and
+ "ebuild-locks" in self.settings.features):
+ eroot = self.settings["EROOT"]
+ lock_path = os.path.join(eroot, portage.VDB_PATH + "-ebuild")
+ if os.access(os.path.dirname(lock_path), os.W_OK):
+ self._ebuild_lock = AsynchronousLock(path=lock_path,
+ scheduler=self.scheduler)
+ self._start_task(self._ebuild_lock, self._lock_exit)
+ return
+
+ self._start_ebuild()
+
+ def _lock_exit(self, ebuild_lock):
+ if self._default_exit(ebuild_lock) != os.EX_OK:
+ self.wait()
+ return
+ self._start_ebuild()
+
+ def _start_ebuild(self):
+
+ # Don't open the log file during the clean phase since the
+ # open file can result in an nfs lock on $T/build.log which
+ # prevents the clean phase from removing $T.
+ logfile = None
+ if self.phase not in ("clean", "cleanrm") and \
+ self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+
+ fd_pipes = None
+ if not self.background and self.phase == 'nofetch':
+ # All the pkg_nofetch output goes to stderr since
+ # it's considered to be an error message.
+ fd_pipes = {1 : sys.stderr.fileno()}
+
+ ebuild_process = EbuildProcess(actionmap=self.actionmap,
+ background=self.background, fd_pipes=fd_pipes, logfile=logfile,
+ phase=self.phase, scheduler=self.scheduler,
+ settings=self.settings)
+
+ self._start_task(ebuild_process, self._ebuild_exit)
+
+ def _ebuild_exit(self, ebuild_process):
+
+ if self._ebuild_lock is not None:
+ self._ebuild_lock.unlock()
+ self._ebuild_lock = None
+
+ fail = False
+ if self._default_exit(ebuild_process) != os.EX_OK:
+ if self.phase == "test" and \
+ "test-fail-continue" in self.settings.features:
+ pass
+ else:
+ fail = True
+
+ if not fail:
+ self.returncode = None
+
+ logfile = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+
+ if self.phase == "install":
+ out = io.StringIO()
+ _check_build_log(self.settings, out=out)
+ msg = out.getvalue()
+ self.scheduler.output(msg, log_path=logfile)
+
+ if fail:
+ self._die_hooks()
+ return
+
+ settings = self.settings
+ _post_phase_userpriv_perms(settings)
+
+ if self.phase == "install":
+ out = io.StringIO()
+ _post_src_install_chost_fix(settings)
+ _post_src_install_uid_fix(settings, out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=logfile)
+ elif self.phase == "preinst":
+ _preinst_bsdflags(settings)
+ elif self.phase == "postinst":
+ _postinst_bsdflags(settings)
+
+ post_phase_cmds = _post_phase_cmds.get(self.phase)
+ if post_phase_cmds is not None:
+ if logfile is not None and self.phase in ("install",):
+ # Log to a temporary file, since the code we are running
+ # reads PORTAGE_LOG_FILE for QA checks, and we want to
+ # avoid annoying "gzip: unexpected end of file" messages
+ # when FEATURES=compress-build-logs is enabled.
+ fd, logfile = tempfile.mkstemp()
+ os.close(fd)
+ post_phase = MiscFunctionsProcess(background=self.background,
+ commands=post_phase_cmds, logfile=logfile, phase=self.phase,
+ scheduler=self.scheduler, settings=settings)
+ self._start_task(post_phase, self._post_phase_exit)
+ return
+
+ # this point is not reachable if there was a failure and
+ # we returned for die_hooks above, so returncode must
+ # indicate success (especially if ebuild_process.returncode
+ # is unsuccessful and test-fail-continue came into play)
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+
+ def _post_phase_exit(self, post_phase):
+
+ self._assert_current(post_phase)
+
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+
+ if post_phase.logfile is not None and \
+ post_phase.logfile != log_path:
+ # We were logging to a temp file (see above), so append
+ # temp file to main log and remove temp file.
+ self._append_temp_log(post_phase.logfile, log_path)
+
+ if self._final_exit(post_phase) != os.EX_OK:
+ writemsg("!!! post %s failed; exiting.\n" % self.phase,
+ noiselevel=-1)
+ self._die_hooks()
+ return
+
+ if self.phase == "install":
+ out = io.StringIO()
+ _post_src_install_soname_symlinks(self.settings, out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=log_path)
+
+ self._current_task = None
+ self.wait()
+ return
+
+ def _append_temp_log(self, temp_log, log_path):
+
+ temp_file = open(_unicode_encode(temp_log,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+
+ log_file = self._open_log(log_path)
+
+ for line in temp_file:
+ log_file.write(line)
+
+ temp_file.close()
+ log_file.close()
+ os.unlink(temp_log)
+
+ def _open_log(self, log_path):
+
+ f = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='ab')
+
+ if log_path.endswith('.gz'):
+ f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+ return f
+
+ def _die_hooks(self):
+ self.returncode = None
+ phase = 'die_hooks'
+ die_hooks = MiscFunctionsProcess(background=self.background,
+ commands=[phase], phase=phase,
+ scheduler=self.scheduler, settings=self.settings)
+ self._start_task(die_hooks, self._die_hooks_exit)
+
+ def _die_hooks_exit(self, die_hooks):
+ if self.phase != 'clean' and \
+ 'noclean' not in self.settings.features and \
+ 'fail-clean' in self.settings.features:
+ self._default_exit(die_hooks)
+ self._fail_clean()
+ return
+ self._final_exit(die_hooks)
+ self.returncode = 1
+ self.wait()
+
+ def _fail_clean(self):
+ self.returncode = None
+ portage.elog.elog_process(self.settings.mycpv, self.settings)
+ phase = "clean"
+ clean_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler, settings=self.settings)
+ self._start_task(clean_phase, self._fail_clean_exit)
+ return
+
+ def _fail_clean_exit(self, clean_phase):
+ self._final_exit(clean_phase)
+ self.returncode = 1
+ self.wait()
+
+ def _elog(self, elog_funcname, lines, background=None):
+ if background is None:
+ background = self.background
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ if msg:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ self.scheduler.output(msg, log_path=log_path,
+ background=background)
diff --git a/portage_with_autodep/pym/_emerge/EbuildProcess.py b/portage_with_autodep/pym/_emerge/EbuildProcess.py
new file mode 100644
index 0000000..ce97aff
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildProcess.py
@@ -0,0 +1,21 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.doebuild:_doebuild_spawn,_spawn_actionmap'
+)
+
+class EbuildProcess(AbstractEbuildProcess):
+
+ __slots__ = ('actionmap',)
+
+ def _spawn(self, args, **kwargs):
+
+ actionmap = self.actionmap
+ if actionmap is None:
+ actionmap = _spawn_actionmap(self.settings)
+
+ return _doebuild_spawn(self.phase, self.settings,
+ actionmap=actionmap, **kwargs)
diff --git a/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py b/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py
new file mode 100644
index 0000000..e1f682a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py
@@ -0,0 +1,16 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+
+class EbuildSpawnProcess(AbstractEbuildProcess):
+ """
+ Used by doebuild.spawn() to manage the spawned process.
+ """
+ _spawn_kwarg_names = AbstractEbuildProcess._spawn_kwarg_names + \
+ ('fakeroot_state',)
+
+ __slots__ = ('fakeroot_state', 'spawn_func')
+
+ def _spawn(self, args, **kwargs):
+ return self.spawn_func(args, env=self.settings.environ(), **kwargs)
diff --git a/portage_with_autodep/pym/_emerge/EventsAnalyser.py b/portage_with_autodep/pym/_emerge/EventsAnalyser.py
new file mode 100644
index 0000000..65ece7b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EventsAnalyser.py
@@ -0,0 +1,511 @@
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage import os
+
+import subprocess
+import re
+
+class PortageUtils:
+ """ class for accessing the portage api """
+ def __init__(self, settings):
+ """ test """
+ self.settings=settings
+ self.vartree=portage.vartree(settings=settings)
+ self.vardbapi=portage.vardbapi(settings=settings, vartree=self.vartree)
+ self.portdbapi=portage.portdbapi(mysettings=settings)
+ self.metadata_keys = [k for k in portage.auxdbkeys if not k.startswith("UNUSED_")]
+ self.use=self.settings["USE"]
+
+ def get_best_visible_pkg(self,pkg):
+ """
+ Gets best candidate on installing. Returns empty string if no found
+
+ :param pkg: package name
+
+ """
+ try:
+ return self.portdbapi.xmatch("bestmatch-visible", pkg)
+ except:
+ return ''
+
+ # non-recursive dependency getter
+ def get_dep(self,pkg,dep_type=["RDEPEND","DEPEND"]):
+ """
+ Gets current dependencies of a package. Looks in portage db
+
+ :param pkg: name of package
+ :param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or
+ ["RDEPEND", "DEPEND"]
+ :returns: **set** of packages names
+ """
+ ret=set()
+
+ pkg = self.get_best_visible_pkg(pkg)
+ if not pkg:
+ return ret
+
+ # we found the best visible match in common tree
+
+
+ metadata = dict(zip(self.metadata_keys,
+ self.portdbapi.aux_get(pkg, self.metadata_keys)))
+ dep_str = " ".join(metadata[k] for k in dep_type)
+
+ # the IUSE default are very important for us
+ iuse_defaults=[
+ u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
+
+ use=self.use.split()
+
+ for u in iuse_defaults:
+ if u not in use:
+ use.append(u)
+
+ success, atoms = portage.dep_check(dep_str, None, self.settings,
+ myuse=use, myroot=self.settings["ROOT"],
+ trees={self.settings["ROOT"]:{"vartree":self.vartree, "porttree": self.vartree}})
+ if not success:
+ return ret
+
+ for atom in atoms:
+ atomname = self.vartree.dep_bestmatch(atom)
+
+ if not atomname:
+ continue
+
+ for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
+ for pkg in self.vartree.dep_match(unvirt_pkg):
+ ret.add(pkg)
+
+ return ret
+
+ # recursive dependency getter
+ def get_deps(self,pkg,dep_type=["RDEPEND","DEPEND"]):
+ """
+ Gets current dependencies of a package on any depth
+ All dependencies **must** be installed
+
+ :param pkg: name of package
+ :param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or
+ ["RDEPEND", "DEPEND"]
+ :returns: **set** of packages names
+ """
+ ret=set()
+
+
+ # get porttree dependencies on the first package
+
+ pkg = self.portdbapi.xmatch("bestmatch-visible", pkg)
+ if not pkg:
+ return ret
+
+ known_packages=set()
+ unknown_packages=self.get_dep(pkg,dep_type)
+ ret=ret.union(unknown_packages)
+
+ while unknown_packages:
+ p=unknown_packages.pop()
+ if p in known_packages:
+ continue
+ known_packages.add(p)
+
+ metadata = dict(zip(self.metadata_keys, self.vardbapi.aux_get(p, self.metadata_keys)))
+
+ dep_str = " ".join(metadata[k] for k in dep_type)
+
+ # the IUSE default are very important for us
+ iuse_defaults=[
+ u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
+
+ use=self.use.split()
+
+ for u in iuse_defaults:
+ if u not in use:
+ use.append(u)
+
+ success, atoms = portage.dep_check(dep_str, None, self.settings,
+ myuse=use, myroot=self.settings["ROOT"],
+ trees={self.settings["ROOT"]:{"vartree":self.vartree,"porttree": self.vartree}})
+
+ if not success:
+ continue
+
+ for atom in atoms:
+ atomname = self.vartree.dep_bestmatch(atom)
+ if not atomname:
+ continue
+
+ for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
+ for pkg in self.vartree.dep_match(unvirt_pkg):
+ ret.add(pkg)
+ unknown_packages.add(pkg)
+ return ret
+
+ def get_deps_for_package_building(self, pkg):
+ """
+ returns buildtime dependencies of current package and
+ all runtime dependencies of that buildtime dependencies
+ """
+ buildtime_deps=self.get_dep(pkg, ["DEPEND"])
+ runtime_deps=set()
+ for dep in buildtime_deps:
+ runtime_deps=runtime_deps.union(self.get_deps(dep,["RDEPEND"]))
+
+ ret=buildtime_deps.union(runtime_deps)
+ return ret
+
+ def get_system_packages_list(self):
+ """
+ returns all packages from system set. They are always implicit dependencies
+
+ :returns: **list** of package names
+ """
+ ret=[]
+ for atom in self.settings.packages:
+ for pre_pkg in self.vartree.dep_match(atom):
+ for unvirt_pkg in expand_new_virt(self.vardbapi,'='+pre_pkg):
+ for pkg in self.vartree.dep_match(unvirt_pkg):
+ ret.append(pkg)
+ return ret
+
+
+class GentoolkitUtils:
+ """
+ Interface with qfile and qlist utils. They are much faster than
+ internals.
+ """
+
+ def getpackagesbyfiles(files):
+ """
+ :param files: list of filenames
+ :returns: **dictionary** file->package, if file doesn't belong to any
+ package it not returned as key of this dictionary
+ """
+ ret={}
+ listtocheck=[]
+ for f in files:
+ if os.path.isdir(f):
+ ret[f]="directory"
+ else:
+ listtocheck.append(f)
+
+ try:
+ proc=subprocess.Popen(['qfile']+['--nocolor','--exact','','--from','-'],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE,
+ bufsize=4096)
+
+ out,err=proc.communicate("\n".join(listtocheck).encode("utf8"))
+
+ lines=out.decode("utf8").split("\n")
+ #print lines
+ line_re=re.compile(r"^([^ ]+)\s+\(([^)]+)\)$")
+ for line in lines:
+ if len(line)==0:
+ continue
+ match=line_re.match(line)
+ if match:
+ ret[match.group(2)]=match.group(1)
+ else:
+ portage.util.writemsg("Util qfile returned unparsable string: %s\n" % line)
+
+ except OSError as e:
+ portage.util.writemsg("Error while launching qfile: %s\n" % e)
+
+
+ return ret
+
+ def getfilesbypackages(packagenames):
+ """
+
+ :param packagename: name of package
+ :returns: **list** of files in package with name *packagename*
+ """
+ ret=[]
+ try:
+ proc=subprocess.Popen(['qlist']+['--nocolor',"--obj"]+packagenames,
+ stdout=subprocess.PIPE,stderr=subprocess.PIPE,
+ bufsize=4096)
+
+ out,err=proc.communicate()
+
+ ret=out.decode("utf8").split("\n")
+ if ret==['']:
+ ret=[]
+ except OSError as e:
+ portage.util.writemsg("Error while launching qfile: %s\n" % e)
+
+ return ret
+
+ def get_all_packages_files():
+ """
+ Memory-hungry operation
+
+ :returns: **set** of all files that belongs to package
+ """
+ ret=[]
+ try:
+ proc=subprocess.Popen(['qlist']+['--all',"--obj"],
+ stdout=subprocess.PIPE,stderr=subprocess.PIPE,
+ bufsize=4096)
+
+ out,err=proc.communicate()
+
+ ret=out.decode("utf8").split("\n")
+ except OSError as e:
+ portage.util.writemsg("Error while launching qfile: %s\n" % e)
+
+ return set(ret)
+
+class FilterProcGenerator:
+ def __init__(self, pkgname, settings):
+ portageutils=PortageUtils(settings=settings)
+
+ deps_all=portageutils.get_deps_for_package_building(pkgname)
+ deps_portage=portageutils.get_dep('portage',["RDEPEND"])
+
+ system_packages=portageutils.get_system_packages_list()
+
+ allfiles=GentoolkitUtils.get_all_packages_files()
+ portage.util.writemsg("All files list recieved, waiting for " \
+ "a list of allowed files\n")
+
+
+ allowedpkgs=system_packages+list(deps_portage)+list(deps_all)
+
+ allowedfiles=GentoolkitUtils.getfilesbypackages(allowedpkgs)
+ #for pkg in allowedpkgs:
+ # allowedfiles+=GentoolkitUtils.getfilesbypackage(pkg)
+
+ #import pdb; pdb.set_trace()
+
+ # manually add all python interpreters to this list
+ allowedfiles+=GentoolkitUtils.getfilesbypackages(['python'])
+ allowedfiles=set(allowedfiles)
+
+ deniedfiles=allfiles-allowedfiles
+
+ def filter_proc(eventname,filename,stage):
+ if filename in deniedfiles:
+ return False
+ return True
+
+ self.filter_proc=filter_proc
+ def get_filter_proc(self):
+ return self.filter_proc
+
+class EventsAnalyser:
+ def __init__(self, pkgname, events, settings):
+ self.pkgname=pkgname
+ self.events=events
+ self.settings=settings
+ self.portageutils=PortageUtils(settings=settings)
+
+ self.deps_all=self.portageutils.get_deps_for_package_building(pkgname)
+ self.deps_direct=self.portageutils.get_dep(pkgname,["DEPEND"])
+ self.deps_portage=self.portageutils.get_dep('portage',["RDEPEND"])
+
+ self.system_packages=self.portageutils.get_system_packages_list()
+ # All analyse work is here
+
+ # get unique filenames
+ filenames=set()
+ for stage in events:
+ succ_events=set(events[stage][0])
+ fail_events=set(events[stage][1])
+ filenames=filenames.union(succ_events)
+ filenames=filenames.union(fail_events)
+ filenames=list(filenames)
+
+ file_to_package=GentoolkitUtils.getpackagesbyfiles(filenames)
+ # This part is completly unreadable.
+ # It converting one complex struct(returned by getfsevents) to another complex
+ # struct which good for generating output.
+ #
+ # Old struct is also used during output
+
+ packagesinfo={}
+
+ for stage in sorted(events):
+ succ_events=events[stage][0]
+ fail_events=events[stage][1]
+
+ for filename in succ_events:
+ if filename in file_to_package:
+ package=file_to_package[filename]
+ else:
+ package="unknown"
+
+ if not package in packagesinfo:
+ packagesinfo[package]={}
+ stageinfo=packagesinfo[package]
+ if not stage in stageinfo:
+ stageinfo[stage]={}
+
+ filesinfo=stageinfo[stage]
+ if not filename in filesinfo:
+ filesinfo[filename]={"found":[],"notfound":[]}
+ filesinfo[filename]["found"]=succ_events[filename]
+
+ for filename in fail_events:
+ if filename in file_to_package:
+ package=file_to_package[filename]
+ else:
+ package="unknown"
+ if not package in packagesinfo:
+ packagesinfo[package]={}
+ stageinfo=packagesinfo[package]
+ if not stage in stageinfo:
+ stageinfo[stage]={}
+
+ filesinfo=stageinfo[stage]
+ if not filename in filesinfo:
+ filesinfo[filename]={"found":[],"notfound":[]}
+ filesinfo[filename]["notfound"]=fail_events[filename]
+ self.packagesinfo=packagesinfo
+
+ def display(self):
+ portage.util.writemsg(
+ portage.output.colorize(
+ "WARN", "\nFile access report for %s:\n" % self.pkgname))
+
+ stagesorder={"clean":1,"setup":2,"unpack":3,"prepare":4,"configure":5,"compile":6,"test":7,
+ "install":8,"preinst":9,"postinst":10,"prerm":11,"postrm":12,"unknown":13}
+ packagesinfo=self.packagesinfo
+ # print information grouped by package
+ for package in sorted(packagesinfo):
+ # not showing special directory package
+ if package=="directory":
+ continue
+
+ if package=="unknown":
+ continue
+
+
+ is_pkg_in_dep=package in self.deps_all
+ is_pkg_in_portage_dep=package in self.deps_portage
+ is_pkg_in_system=package in self.system_packages
+ is_pkg_python="dev-lang/python" in package
+
+ stages=[]
+ for stage in sorted(packagesinfo[package].keys(), key=stagesorder.get):
+ if stage!="unknown":
+ stages.append(stage)
+
+ if len(stages)==0:
+ continue
+
+ filenames={}
+ for stage in stages:
+ for filename in packagesinfo[package][stage]:
+ if len(packagesinfo[package][stage][filename]["found"])!=0:
+ was_readed,was_writed=packagesinfo[package][stage][filename]["found"]
+ if not filename in filenames:
+ filenames[filename]=['ok',was_readed,was_writed]
+ else:
+ status, old_was_readed, old_was_writed=filenames[filename]
+ filenames[filename]=[
+ 'ok',old_was_readed | was_readed, old_was_writed | was_writed
+ ]
+ if len(packagesinfo[package][stage][filename]["notfound"])!=0:
+ was_notfound,was_blocked=packagesinfo[package][stage][filename]["notfound"]
+ if not filename in filenames:
+ filenames[filename]=['err',was_notfound,was_blocked]
+ else:
+ status, old_was_notfound, old_was_blocked=filenames[filename]
+ filenames[filename]=[
+ 'err',old_was_notfound | was_notfound, old_was_blocked | was_blocked
+ ]
+
+
+ if is_pkg_in_dep:
+ portage.util.writemsg("[OK]")
+ elif is_pkg_in_system:
+ portage.util.writemsg("[SYSTEM]")
+ elif is_pkg_in_portage_dep:
+ portage.util.writemsg("[PORTAGE DEP]")
+ elif is_pkg_python:
+ portage.util.writemsg("[INTERPRETER]")
+ elif not self.is_package_useful(package,stages,filenames.keys()):
+ portage.util.writemsg("[LIKELY OK]")
+ else:
+ portage.util.writemsg(portage.output.colorize("BAD", "[NOT IN DEPS]"))
+ # show information about accessed files
+
+ portage.util.writemsg(" %-40s: %s\n" % (package,stages))
+
+ # this is here for readability
+ action={
+ ('ok',False,False):"accessed",
+ ('ok',True,False):"readed",
+ ('ok',False,True):"writed",
+ ('ok',True,True):"readed and writed",
+ ('err',False,False):"other error",
+ ('err',True,False):"not found",
+ ('err',False,True):"blocked",
+ ('err',True,True):"not found and blocked"
+ }
+
+ filescounter=0
+
+ for filename in filenames:
+ event_info=tuple(filenames[filename])
+ portage.util.writemsg(" %-56s %-21s\n" % (filename,action[event_info]))
+ filescounter+=1
+ if filescounter>10:
+ portage.util.writemsg(" ... and %d more ...\n" % (len(filenames)-10))
+ break
+ # ... and one more check. Making sure that direct build time
+ # dependencies were accessed
+ #import pdb; pdb.set_trace()
+ not_accessed_deps=set(self.deps_direct)-set(self.packagesinfo.keys())
+ if not_accessed_deps:
+ portage.util.writemsg(portage.output.colorize("WARN", "!!! "))
+ portage.util.writemsg("Warning! Some build time dependencies " + \
+ "of packages were not accessed: " + \
+ " ".join(not_accessed_deps) + "\n")
+
+ def is_package_useful(self,pkg,stages,files):
+ """ some basic heuristics here to cut part of packages """
+
+ excluded_paths=set(
+ ['/etc/sandbox.d/']
+ )
+
+ excluded_packages=set(
+ # autodep shows these two packages every time
+ ['net-zope/zope-fixers', 'net-zope/zope-interface']
+ )
+
+
+ def is_pkg_excluded(p):
+ for pkg in excluded_packages:
+ if p.startswith(pkg): # if package is excluded
+ return True
+ return False
+
+
+ def is_file_excluded(f):
+ for path in excluded_paths:
+ if f.startswith(path): # if path is excluded
+ return True
+ return False
+
+
+ if is_pkg_excluded(pkg):
+ return False
+
+ for f in files:
+ if is_file_excluded(f):
+ continue
+
+ # test 1: package is not useful if all files are *.desktop or *.xml or *.m4
+ if not (f.endswith(".desktop") or f.endswith(".xml") or f.endswith(".m4") or f.endswith(".pc")):
+ break
+ else:
+ return False # we get here if cycle ends not with break
+
+ return True
+
+ \ No newline at end of file
diff --git a/portage_with_autodep/pym/_emerge/EventsLogger.py b/portage_with_autodep/pym/_emerge/EventsLogger.py
new file mode 100644
index 0000000..68b3c67
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EventsLogger.py
@@ -0,0 +1,180 @@
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import sys
+import stat
+import socket
+import select
+import tempfile
+
+import threading
+
+from portage import os
+
+class EventsLogger(threading.Thread):
+ def default_filter(eventname, filename, stage):
+ return True
+
+ def __init__(self, socket_dir="/tmp/", filter_proc=default_filter):
+ threading.Thread.__init__(self) # init the Thread
+
+ self.alive=False
+
+ self.main_thread=threading.currentThread()
+
+ self.socket_dir=socket_dir
+ self.filter_proc=filter_proc
+
+ self.socket_name=None
+ self.socket_logger=None
+
+ self.events={}
+
+ try:
+ socket_dir_name = tempfile.mkdtemp(dir=self.socket_dir,
+ prefix="log_socket_")
+
+ socket_name = os.path.join(socket_dir_name, 'socket')
+
+ except OSError as e:
+ return
+
+ self.socket_name=socket_name
+
+ #print(self.socket_name)
+
+ try:
+ socket_logger=socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
+ socket_logger.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+ socket_logger.bind(self.socket_name)
+ socket_logger.listen(64)
+
+ except socket.error as e:
+ return
+
+ self.socket_logger=socket_logger
+
+ try:
+ # Allow connecting to socket for anyone
+ os.chmod(socket_dir_name,
+ stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|
+ stat.S_IROTH|stat.S_IWOTH|stat.S_IXOTH)
+ os.chmod(socket_name,
+ stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|
+ stat.S_IROTH|stat.S_IWOTH|stat.S_IXOTH)
+ except OSError as e:
+ return
+
+ def run(self):
+ """ Starts the log server """
+
+ self.alive=True
+ self.listen_thread=threading.currentThread()
+ clients={}
+
+ epoll=select.epoll()
+ epoll.register(self.socket_logger.fileno(), select.EPOLLIN)
+
+ while self.alive:
+ try:
+ sock_events = epoll.poll(3)
+
+ for fileno, sock_event in sock_events:
+ if fileno == self.socket_logger.fileno():
+ ret = self.socket_logger.accept()
+ if ret is None:
+ pass
+ else:
+ (client,addr)=ret
+ epoll.register(client.fileno(), select.EPOLLIN)
+ clients[client.fileno()]=client
+ elif sock_event & select.EPOLLIN:
+ s=clients[fileno]
+ record=s.recv(8192)
+
+ if not record: # if connection was closed
+ epoll.unregister(fileno)
+ clients[fileno].close()
+ del clients[fileno]
+ continue
+
+ #import pdb; pdb.set_trace()
+ try:
+ message=record.decode("utf8").split("\0")
+ except UnicodeDecodeError:
+ print("Bad message %s" % record)
+ continue
+
+ # continue
+
+ #print(message)
+
+ try:
+ if message[4]=="ASKING":
+ if self.filter_proc(message[1],message[2],message[3]):
+ s.sendall(b"ALLOW\0")
+ else:
+ # TODO: log through portage infrastructure
+ #print("Blocking an access to %s" % message[2])
+ s.sendall(b"DENY\0")
+ else:
+ eventname,filename,stage,result=message[1:5]
+
+ if not stage in self.events:
+ self.events[stage]=[{},{}]
+
+ hashofsucesses=self.events[stage][0]
+ hashoffailures=self.events[stage][1]
+
+ if result=="DENIED":
+ print("Blocking an access to %s" % filename)
+
+ if result=="OK":
+ if not filename in hashofsucesses:
+ hashofsucesses[filename]=[False,False]
+
+ readed_or_writed=hashofsucesses[filename]
+
+ if eventname=="read":
+ readed_or_writed[0]=True
+ elif eventname=="write":
+ readed_or_writed[1]=True
+
+ elif result[0:3]=="ERR" or result=="DENIED":
+ if not filename in hashoffailures:
+ hashoffailures[filename]=[False,False]
+ notfound_or_blocked=hashoffailures[filename]
+
+ if result=="ERR/2":
+ notfound_or_blocked[0]=True
+ elif result=="DENIED":
+ notfound_or_blocked[1]=True
+
+ else:
+ print("Error in logger module<->analyser protocol")
+
+ except IndexError:
+ print("IndexError while parsing %s" % record)
+ except IOError as e:
+ if e.errno!=4: # handling "Interrupted system call" errors
+ raise
+
+ # if main thread doesnt exists then exit
+ if not self.main_thread.is_alive():
+ break
+ epoll.unregister(self.socket_logger.fileno())
+ epoll.close()
+ self.socket_logger.close()
+
+ def stop(self):
+ """ Stops the log server. Returns all events """
+
+ self.alive=False
+
+ # Block the main thread until listener exists
+ self.listen_thread.join()
+
+ # We assume portage clears tmp folder, so no deleting a socket file
+ # We assume that no new socket data will arrive after this moment
+ return self.events
diff --git a/portage_with_autodep/pym/_emerge/FakeVartree.py b/portage_with_autodep/pym/_emerge/FakeVartree.py
new file mode 100644
index 0000000..a11966f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/FakeVartree.py
@@ -0,0 +1,265 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+from portage import os
+from _emerge.Package import Package
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from portage.const import VDB_PATH
+from portage.dbapi.vartree import vartree
+from portage.repository.config import _gen_valid_repo
+from portage.update import grab_updates, parse_updates, update_dbentries
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class FakeVardbapi(PackageVirtualDbapi):
+ """
+ Implements the vardbapi.getpath() method which is used in error handling
+ code for the Package class and vartree.get_provide().
+ """
+ def getpath(self, cpv, filename=None):
+ path = os.path.join(self.settings['EROOT'], VDB_PATH, cpv)
+ if filename is not None:
+ path =os.path.join(path, filename)
+ return path
+
+class FakeVartree(vartree):
+ """This is implements an in-memory copy of a vartree instance that provides
+ all the interfaces required for use by the depgraph. The vardb is locked
+ during the constructor call just long enough to read a copy of the
+ installed package information. This allows the depgraph to do it's
+ dependency calculations without holding a lock on the vardb. It also
+ allows things like vardb global updates to be done in memory so that the
+ user doesn't necessarily need write access to the vardb in cases where
+ global updates are necessary (updates are performed when necessary if there
+ is not a matching ebuild in the tree). Instances of this class are not
+ populated until the sync() method is called."""
+ def __init__(self, root_config, pkg_cache=None, pkg_root_config=None):
+ self._root_config = root_config
+ if pkg_root_config is None:
+ pkg_root_config = self._root_config
+ self._pkg_root_config = pkg_root_config
+ if pkg_cache is None:
+ pkg_cache = {}
+ real_vartree = root_config.trees["vartree"]
+ self._real_vardb = real_vartree.dbapi
+ portdb = root_config.trees["porttree"].dbapi
+ self.root = real_vartree.root
+ self.settings = real_vartree.settings
+ mykeys = list(real_vartree.dbapi._aux_cache_keys)
+ if "_mtime_" not in mykeys:
+ mykeys.append("_mtime_")
+ self._db_keys = mykeys
+ self._pkg_cache = pkg_cache
+ self.dbapi = FakeVardbapi(real_vartree.settings)
+
+ # Initialize variables needed for lazy cache pulls of the live ebuild
+ # metadata. This ensures that the vardb lock is released ASAP, without
+ # being delayed in case cache generation is triggered.
+ self._aux_get = self.dbapi.aux_get
+ self.dbapi.aux_get = self._aux_get_wrapper
+ self._match = self.dbapi.match
+ self.dbapi.match = self._match_wrapper
+ self._aux_get_history = set()
+ self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
+ self._portdb = portdb
+ self._global_updates = None
+
+ def _match_wrapper(self, cpv, use_cache=1):
+ """
+ Make sure the metadata in Package instances gets updated for any
+ cpv that is returned from a match() call, since the metadata can
+ be accessed directly from the Package instance instead of via
+ aux_get().
+ """
+ matches = self._match(cpv, use_cache=use_cache)
+ for cpv in matches:
+ if cpv in self._aux_get_history:
+ continue
+ self._aux_get_wrapper(cpv, [])
+ return matches
+
+ def _aux_get_wrapper(self, pkg, wants, myrepo=None):
+ if pkg in self._aux_get_history:
+ return self._aux_get(pkg, wants)
+ self._aux_get_history.add(pkg)
+ # We need to check the EAPI, and this also raises
+ # a KeyError to the caller if appropriate.
+ installed_eapi, repo = self._aux_get(pkg, ["EAPI", "repository"])
+ try:
+ # Use the live ebuild metadata if possible.
+ repo = _gen_valid_repo(repo)
+ live_metadata = dict(zip(self._portdb_keys,
+ self._portdb.aux_get(pkg, self._portdb_keys, myrepo=repo)))
+ # Use the metadata from the installed instance if the EAPI
+ # of either instance is unsupported, since if the installed
+ # instance has an unsupported or corrupt EAPI then we don't
+ # want to attempt to do complex operations such as execute
+ # pkg_config, pkg_prerm or pkg_postrm phases. If both EAPIs
+ # are supported then go ahead and use the live_metadata, in
+ # order to respect dep updates without revision bump or EAPI
+ # bump, as in bug #368725.
+ if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
+ portage.eapi_is_supported(installed_eapi)):
+ raise KeyError(pkg)
+ self.dbapi.aux_update(pkg, live_metadata)
+ except (KeyError, portage.exception.PortageException):
+ if self._global_updates is None:
+ self._global_updates = \
+ grab_global_updates(self._portdb)
+ perform_global_updates(
+ pkg, self.dbapi, self._global_updates)
+ return self._aux_get(pkg, wants)
+
+ def cpv_discard(self, pkg):
+ """
+ Discard a package from the fake vardb if it exists.
+ """
+ old_pkg = self.dbapi.get(pkg)
+ if old_pkg is not None:
+ self.dbapi.cpv_remove(old_pkg)
+ self._pkg_cache.pop(old_pkg, None)
+ self._aux_get_history.discard(old_pkg.cpv)
+
+ def sync(self, acquire_lock=1):
+ """
+ Call this method to synchronize state with the real vardb
+ after one or more packages may have been installed or
+ uninstalled.
+ """
+ locked = False
+ try:
+ if acquire_lock and os.access(self._real_vardb._dbroot, os.W_OK):
+ self._real_vardb.lock()
+ locked = True
+ self._sync()
+ finally:
+ if locked:
+ self._real_vardb.unlock()
+
+ # Populate the old-style virtuals using the cached values.
+ # Skip the aux_get wrapper here, to avoid unwanted
+ # cache generation.
+ try:
+ self.dbapi.aux_get = self._aux_get
+ self.settings._populate_treeVirtuals_if_needed(self)
+ finally:
+ self.dbapi.aux_get = self._aux_get_wrapper
+
+ def _sync(self):
+
+ real_vardb = self._root_config.trees["vartree"].dbapi
+ current_cpv_set = frozenset(real_vardb.cpv_all())
+ pkg_vardb = self.dbapi
+ pkg_cache = self._pkg_cache
+ aux_get_history = self._aux_get_history
+
+ # Remove any packages that have been uninstalled.
+ for pkg in list(pkg_vardb):
+ if pkg.cpv not in current_cpv_set:
+ self.cpv_discard(pkg)
+
+ # Validate counters and timestamps.
+ slot_counters = {}
+ root_config = self._pkg_root_config
+ validation_keys = ["COUNTER", "_mtime_"]
+ for cpv in current_cpv_set:
+
+ pkg_hash_key = Package._gen_hash_key(cpv=cpv,
+ installed=True, root_config=root_config,
+ type_name="installed")
+ pkg = pkg_vardb.get(pkg_hash_key)
+ if pkg is not None:
+ counter, mtime = real_vardb.aux_get(cpv, validation_keys)
+ try:
+ counter = long(counter)
+ except ValueError:
+ counter = 0
+
+ if counter != pkg.counter or \
+ mtime != pkg.mtime:
+ self.cpv_discard(pkg)
+ pkg = None
+
+ if pkg is None:
+ pkg = self._pkg(cpv)
+
+ other_counter = slot_counters.get(pkg.slot_atom)
+ if other_counter is not None:
+ if other_counter > pkg.counter:
+ continue
+
+ slot_counters[pkg.slot_atom] = pkg.counter
+ pkg_vardb.cpv_inject(pkg)
+
+ real_vardb.flush_cache()
+
+ def _pkg(self, cpv):
+ """
+ The RootConfig instance that will become the Package.root_config
+ attribute can be overridden by the FakeVartree pkg_root_config
+ constructory argument, since we want to be consistent with the
+ depgraph._pkg() method which uses a specially optimized
+ RootConfig that has a FakeVartree instead of a real vartree.
+ """
+ pkg = Package(cpv=cpv, built=True, installed=True,
+ metadata=zip(self._db_keys,
+ self._real_vardb.aux_get(cpv, self._db_keys)),
+ root_config=self._pkg_root_config,
+ type_name="installed")
+
+ try:
+ mycounter = long(pkg.metadata["COUNTER"])
+ except ValueError:
+ mycounter = 0
+ pkg.metadata["COUNTER"] = str(mycounter)
+
+ self._pkg_cache[pkg] = pkg
+ return pkg
+
+def grab_global_updates(portdb):
+ retupdates = {}
+
+ for repo_name in portdb.getRepositories():
+ repo = portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ try:
+ rawupdates = grab_updates(updpath)
+ except portage.exception.DirectoryNotFound:
+ rawupdates = []
+ upd_commands = []
+ for mykey, mystat, mycontent in rawupdates:
+ commands, errors = parse_updates(mycontent)
+ upd_commands.extend(commands)
+ retupdates[repo_name] = upd_commands
+
+ master_repo = portdb.getRepositoryName(portdb.porttree_root)
+ if master_repo in retupdates:
+ retupdates['DEFAULT'] = retupdates[master_repo]
+
+ return retupdates
+
+def perform_global_updates(mycpv, mydb, myupdates):
+ aux_keys = ["DEPEND", "RDEPEND", "PDEPEND", 'repository']
+ aux_dict = dict(zip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
+ repository = aux_dict.pop('repository')
+ try:
+ mycommands = myupdates[repository]
+ except KeyError:
+ try:
+ mycommands = myupdates['DEFAULT']
+ except KeyError:
+ return
+
+ if not mycommands:
+ return
+
+ updates = update_dbentries(mycommands, aux_dict)
+ if updates:
+ mydb.aux_update(mycpv, updates)
diff --git a/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
new file mode 100644
index 0000000..a716dac
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
@@ -0,0 +1,81 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+from portage.cache.mappings import slot_dict_class
+
+class FifoIpcDaemon(AbstractPollTask):
+
+ __slots__ = ("input_fifo", "output_fifo",) + \
+ ("_files", "_reg_id",)
+
+ _file_names = ("pipe_in",)
+ _files_dict = slot_dict_class(_file_names, prefix="")
+
+ def _start(self):
+ self._files = self._files_dict()
+ input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+
+ # File streams are in unbuffered mode since we do atomic
+ # read and write of whole pickles.
+ self._files.pipe_in = os.fdopen(input_fd, 'rb', 0)
+
+ self._reg_id = self.scheduler.register(
+ self._files.pipe_in.fileno(),
+ self._registered_events, self._input_handler)
+
+ self._registered = True
+
+ def _reopen_input(self):
+ """
+ Re-open the input stream, in order to suppress
+ POLLHUP events (bug #339976).
+ """
+ self._files.pipe_in.close()
+ input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+ self._files.pipe_in = os.fdopen(input_fd, 'rb', 0)
+ self.scheduler.unregister(self._reg_id)
+ self._reg_id = self.scheduler.register(
+ self._files.pipe_in.fileno(),
+ self._registered_events, self._input_handler)
+
+ def isAlive(self):
+ return self._registered
+
+ def _cancel(self):
+ if self.returncode is None:
+ self.returncode = 1
+ self._unregister()
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+
+ if self._registered:
+ self.scheduler.schedule(self._reg_id)
+ self._unregister()
+
+ if self.returncode is None:
+ self.returncode = os.EX_OK
+
+ return self.returncode
+
+ def _input_handler(self, fd, event):
+ raise NotImplementedError(self)
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._reg_id is not None:
+ self.scheduler.unregister(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ for f in self._files.values():
+ f.close()
+ self._files = None
diff --git a/portage_with_autodep/pym/_emerge/JobStatusDisplay.py b/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
new file mode 100644
index 0000000..1949232
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
@@ -0,0 +1,292 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import formatter
+import io
+import sys
+import time
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.output import xtermTitle
+
+from _emerge.getloadavg import getloadavg
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class JobStatusDisplay(object):
+
+ _bound_properties = ("curval", "failed", "running")
+
+ # Don't update the display unless at least this much
+ # time has passed, in units of seconds.
+ _min_display_latency = 2
+
+ _default_term_codes = {
+ 'cr' : '\r',
+ 'el' : '\x1b[K',
+ 'nel' : '\n',
+ }
+
+ _termcap_name_map = {
+ 'carriage_return' : 'cr',
+ 'clr_eol' : 'el',
+ 'newline' : 'nel',
+ }
+
+ def __init__(self, quiet=False, xterm_titles=True):
+ object.__setattr__(self, "quiet", quiet)
+ object.__setattr__(self, "xterm_titles", xterm_titles)
+ object.__setattr__(self, "maxval", 0)
+ object.__setattr__(self, "merges", 0)
+ object.__setattr__(self, "_changed", False)
+ object.__setattr__(self, "_displayed", False)
+ object.__setattr__(self, "_last_display_time", 0)
+
+ self.reset()
+
+ isatty = os.environ.get('TERM') != 'dumb' and \
+ hasattr(self.out, 'isatty') and \
+ self.out.isatty()
+ object.__setattr__(self, "_isatty", isatty)
+ if not isatty or not self._init_term():
+ term_codes = {}
+ for k, capname in self._termcap_name_map.items():
+ term_codes[k] = self._default_term_codes[capname]
+ object.__setattr__(self, "_term_codes", term_codes)
+ encoding = sys.getdefaultencoding()
+ for k, v in self._term_codes.items():
+ if not isinstance(v, basestring):
+ self._term_codes[k] = v.decode(encoding, 'replace')
+
+ if self._isatty:
+ width = portage.output.get_term_size()[1]
+ else:
+ width = 80
+ self._set_width(width)
+
+ def _set_width(self, width):
+ if width == getattr(self, 'width', None):
+ return
+ if width <= 0 or width > 80:
+ width = 80
+ object.__setattr__(self, "width", width)
+ object.__setattr__(self, "_jobs_column_width", width - 32)
+
+ @property
+ def out(self):
+ """Use a lazy reference to sys.stdout, in case the API consumer has
+ temporarily overridden stdout."""
+ return sys.stdout
+
+ def _write(self, s):
+ # avoid potential UnicodeEncodeError
+ s = _unicode_encode(s,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ out = self.out
+ if sys.hexversion >= 0x3000000:
+ out = out.buffer
+ out.write(s)
+ out.flush()
+
+ def _init_term(self):
+ """
+ Initialize term control codes.
+ @rtype: bool
+ @returns: True if term codes were successfully initialized,
+ False otherwise.
+ """
+
+ term_type = os.environ.get("TERM", "").strip()
+ if not term_type:
+ return False
+ tigetstr = None
+
+ try:
+ import curses
+ try:
+ curses.setupterm(term_type, self.out.fileno())
+ tigetstr = curses.tigetstr
+ except curses.error:
+ pass
+ except ImportError:
+ pass
+
+ if tigetstr is None:
+ return False
+
+ term_codes = {}
+ for k, capname in self._termcap_name_map.items():
+ code = tigetstr(capname)
+ if code is None:
+ code = self._default_term_codes[capname]
+ term_codes[k] = code
+ object.__setattr__(self, "_term_codes", term_codes)
+ return True
+
+ def _format_msg(self, msg):
+ return ">>> %s" % msg
+
+ def _erase(self):
+ self._write(
+ self._term_codes['carriage_return'] + \
+ self._term_codes['clr_eol'])
+ self._displayed = False
+
+ def _display(self, line):
+ self._write(line)
+ self._displayed = True
+
+ def _update(self, msg):
+
+ if not self._isatty:
+ self._write(self._format_msg(msg) + self._term_codes['newline'])
+ self._displayed = True
+ return
+
+ if self._displayed:
+ self._erase()
+
+ self._display(self._format_msg(msg))
+
+ def displayMessage(self, msg):
+
+ was_displayed = self._displayed
+
+ if self._isatty and self._displayed:
+ self._erase()
+
+ self._write(self._format_msg(msg) + self._term_codes['newline'])
+ self._displayed = False
+
+ if was_displayed:
+ self._changed = True
+ self.display()
+
+ def reset(self):
+ self.maxval = 0
+ self.merges = 0
+ for name in self._bound_properties:
+ object.__setattr__(self, name, 0)
+
+ if self._displayed:
+ self._write(self._term_codes['newline'])
+ self._displayed = False
+
+ def __setattr__(self, name, value):
+ old_value = getattr(self, name)
+ if value == old_value:
+ return
+ object.__setattr__(self, name, value)
+ if name in self._bound_properties:
+ self._property_change(name, old_value, value)
+
+ def _property_change(self, name, old_value, new_value):
+ self._changed = True
+ self.display()
+
+ def _load_avg_str(self):
+ try:
+ avg = getloadavg()
+ except OSError:
+ return 'unknown'
+
+ max_avg = max(avg)
+
+ if max_avg < 10:
+ digits = 2
+ elif max_avg < 100:
+ digits = 1
+ else:
+ digits = 0
+
+ return ", ".join(("%%.%df" % digits ) % x for x in avg)
+
+ def display(self):
+ """
+ Display status on stdout, but only if something has
+ changed since the last call.
+ """
+
+ if self.quiet:
+ return
+
+ current_time = time.time()
+ time_delta = current_time - self._last_display_time
+ if self._displayed and \
+ not self._changed:
+ if not self._isatty:
+ return
+ if time_delta < self._min_display_latency:
+ return
+
+ self._last_display_time = current_time
+ self._changed = False
+ self._display_status()
+
+ def _display_status(self):
+ # Don't use len(self._completed_tasks) here since that also
+ # can include uninstall tasks.
+ curval_str = str(self.curval)
+ maxval_str = str(self.maxval)
+ running_str = str(self.running)
+ failed_str = str(self.failed)
+ load_avg_str = self._load_avg_str()
+
+ color_output = io.StringIO()
+ plain_output = io.StringIO()
+ style_file = portage.output.ConsoleStyleFile(color_output)
+ style_file.write_listener = plain_output
+ style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
+ style_writer.style_listener = style_file.new_styles
+ f = formatter.AbstractFormatter(style_writer)
+
+ number_style = "INFORM"
+ f.add_literal_data(_unicode_decode("Jobs: "))
+ f.push_style(number_style)
+ f.add_literal_data(_unicode_decode(curval_str))
+ f.pop_style()
+ f.add_literal_data(_unicode_decode(" of "))
+ f.push_style(number_style)
+ f.add_literal_data(_unicode_decode(maxval_str))
+ f.pop_style()
+ f.add_literal_data(_unicode_decode(" complete"))
+
+ if self.running:
+ f.add_literal_data(_unicode_decode(", "))
+ f.push_style(number_style)
+ f.add_literal_data(_unicode_decode(running_str))
+ f.pop_style()
+ f.add_literal_data(_unicode_decode(" running"))
+
+ if self.failed:
+ f.add_literal_data(_unicode_decode(", "))
+ f.push_style(number_style)
+ f.add_literal_data(_unicode_decode(failed_str))
+ f.pop_style()
+ f.add_literal_data(_unicode_decode(" failed"))
+
+ padding = self._jobs_column_width - len(plain_output.getvalue())
+ if padding > 0:
+ f.add_literal_data(padding * _unicode_decode(" "))
+
+ f.add_literal_data(_unicode_decode("Load avg: "))
+ f.add_literal_data(_unicode_decode(load_avg_str))
+
+ # Truncate to fit width, to avoid making the terminal scroll if the
+ # line overflows (happens when the load average is large).
+ plain_output = plain_output.getvalue()
+ if self._isatty and len(plain_output) > self.width:
+ # Use plain_output here since it's easier to truncate
+ # properly than the color output which contains console
+ # color codes.
+ self._update(plain_output[:self.width])
+ else:
+ self._update(color_output.getvalue())
+
+ if self.xterm_titles:
+ xtermTitle(" ".join(plain_output.split()))
diff --git a/portage_with_autodep/pym/_emerge/MergeListItem.py b/portage_with_autodep/pym/_emerge/MergeListItem.py
new file mode 100644
index 0000000..2176bf6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MergeListItem.py
@@ -0,0 +1,135 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.output import colorize
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.Binpkg import Binpkg
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildBuild import EbuildBuild
+from _emerge.PackageUninstall import PackageUninstall
+
+class MergeListItem(CompositeTask):
+
+ """
+ TODO: For parallel scheduling, everything here needs asynchronous
+ execution support (start, poll, and wait methods).
+ """
+
+ __slots__ = ("args_set",
+ "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
+ "find_blockers", "logger", "mtimedb", "pkg",
+ "pkg_count", "pkg_to_replace", "prefetcher",
+ "settings", "statusMessage", "world_atom") + \
+ ("_install_task",)
+
+ def _start(self):
+
+ pkg = self.pkg
+ build_opts = self.build_opts
+
+ if pkg.installed:
+ # uninstall, executed by self.merge()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ args_set = self.args_set
+ find_blockers = self.find_blockers
+ logger = self.logger
+ mtimedb = self.mtimedb
+ pkg_count = self.pkg_count
+ scheduler = self.scheduler
+ settings = self.settings
+ world_atom = self.world_atom
+ ldpath_mtimes = mtimedb["ldpath"]
+
+ action_desc = "Emerging"
+ preposition = "for"
+ if pkg.type_name == "binary":
+ action_desc += " binary"
+
+ if build_opts.fetchonly:
+ action_desc = "Fetching"
+
+ msg = "%s (%s of %s) %s" % \
+ (action_desc,
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
+ colorize("GOOD", pkg.cpv))
+
+ portdb = pkg.root_config.trees["porttree"].dbapi
+ portdir_repo_name = portdb.getRepositoryName(portdb.porttree_root)
+ if portdir_repo_name:
+ pkg_repo_name = pkg.repo
+ if pkg_repo_name != portdir_repo_name:
+ if pkg_repo_name == pkg.UNKNOWN_REPO:
+ pkg_repo_name = "unknown repo"
+ msg += " from %s" % pkg_repo_name
+
+ if pkg.root != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ if not build_opts.pretend:
+ self.statusMessage(msg)
+ logger.log(" >>> emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ if pkg.type_name == "ebuild":
+
+ build = EbuildBuild(args_set=args_set,
+ background=self.background,
+ config_pool=self.config_pool,
+ find_blockers=find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger,
+ opts=build_opts, pkg=pkg, pkg_count=pkg_count,
+ prefetcher=self.prefetcher, scheduler=scheduler,
+ settings=settings, world_atom=world_atom)
+
+ self._install_task = build
+ self._start_task(build, self._default_final_exit)
+ return
+
+ elif pkg.type_name == "binary":
+
+ binpkg = Binpkg(background=self.background,
+ find_blockers=find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger,
+ opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
+ prefetcher=self.prefetcher, settings=settings,
+ scheduler=scheduler, world_atom=world_atom)
+
+ self._install_task = binpkg
+ self._start_task(binpkg, self._default_final_exit)
+ return
+
+ def create_install_task(self):
+
+ pkg = self.pkg
+ build_opts = self.build_opts
+ mtimedb = self.mtimedb
+ scheduler = self.scheduler
+ settings = self.settings
+ world_atom = self.world_atom
+ ldpath_mtimes = mtimedb["ldpath"]
+
+ if pkg.installed:
+ if not (build_opts.buildpkgonly or \
+ build_opts.fetchonly or build_opts.pretend):
+
+ task = PackageUninstall(background=self.background,
+ ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
+ pkg=pkg, scheduler=scheduler, settings=settings,
+ world_atom=world_atom)
+
+ else:
+ task = AsynchronousTask()
+
+ elif build_opts.fetchonly or \
+ build_opts.buildpkgonly:
+ task = AsynchronousTask()
+ else:
+ task = self._install_task.create_install_task()
+
+ return task
diff --git a/portage_with_autodep/pym/_emerge/MetadataRegen.py b/portage_with_autodep/pym/_emerge/MetadataRegen.py
new file mode 100644
index 0000000..8103175
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MetadataRegen.py
@@ -0,0 +1,184 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from _emerge.PollScheduler import PollScheduler
+
+class MetadataRegen(PollScheduler):
+
+ def __init__(self, portdb, cp_iter=None, consumer=None,
+ max_jobs=None, max_load=None):
+ PollScheduler.__init__(self)
+ self._portdb = portdb
+ self._global_cleanse = False
+ if cp_iter is None:
+ cp_iter = self._iter_every_cp()
+ # We can globally cleanse stale cache only if we
+ # iterate over every single cp.
+ self._global_cleanse = True
+ self._cp_iter = cp_iter
+ self._consumer = consumer
+
+ if max_jobs is None:
+ max_jobs = 1
+
+ self._max_jobs = max_jobs
+ self._max_load = max_load
+
+ self._valid_pkgs = set()
+ self._cp_set = set()
+ self._process_iter = self._iter_metadata_processes()
+ self.returncode = os.EX_OK
+ self._error_count = 0
+ self._running_tasks = set()
+
+ def _terminate_tasks(self):
+ while self._running_tasks:
+ self._running_tasks.pop().cancel()
+
+ def _iter_every_cp(self):
+ portage.writemsg_stdout("Listing available packages...\n")
+ every_cp = self._portdb.cp_all()
+ portage.writemsg_stdout("Regenerating cache entries...\n")
+ every_cp.sort(reverse=True)
+ try:
+ while not self._terminated_tasks:
+ yield every_cp.pop()
+ except IndexError:
+ pass
+
+ def _iter_metadata_processes(self):
+ portdb = self._portdb
+ valid_pkgs = self._valid_pkgs
+ cp_set = self._cp_set
+ consumer = self._consumer
+
+ for cp in self._cp_iter:
+ if self._terminated_tasks:
+ break
+ cp_set.add(cp)
+ portage.writemsg_stdout("Processing %s\n" % cp)
+ cpv_list = portdb.cp_list(cp)
+ for cpv in cpv_list:
+ if self._terminated_tasks:
+ break
+ valid_pkgs.add(cpv)
+ ebuild_path, repo_path = portdb.findname2(cpv)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % cpv)
+ metadata, st, emtime = portdb._pull_valid_cache(
+ cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ if consumer is not None:
+ consumer(cpv, ebuild_path,
+ repo_path, metadata)
+ continue
+
+ yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
+ ebuild_mtime=emtime,
+ metadata_callback=portdb._metadata_callback,
+ portdb=portdb, repo_path=repo_path,
+ settings=portdb.doebuild_settings)
+
+ def run(self):
+
+ portdb = self._portdb
+ from portage.cache.cache_errors import CacheError
+ dead_nodes = {}
+
+ while self._schedule():
+ self._poll_loop()
+
+ while self._jobs:
+ self._poll_loop()
+
+ if self._terminated_tasks:
+ self.returncode = 1
+ return
+
+ if self._global_cleanse:
+ for mytree in portdb.porttrees:
+ try:
+ dead_nodes[mytree] = set(portdb.auxdb[mytree])
+ except CacheError as e:
+ portage.writemsg("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (mytree, e),
+ noiselevel=-1)
+ del e
+ dead_nodes = None
+ break
+ else:
+ cp_set = self._cp_set
+ cpv_getkey = portage.cpv_getkey
+ for mytree in portdb.porttrees:
+ try:
+ dead_nodes[mytree] = set(cpv for cpv in \
+ portdb.auxdb[mytree] \
+ if cpv_getkey(cpv) in cp_set)
+ except CacheError as e:
+ portage.writemsg("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (mytree, e),
+ noiselevel=-1)
+ del e
+ dead_nodes = None
+ break
+
+ if dead_nodes:
+ for y in self._valid_pkgs:
+ for mytree in portdb.porttrees:
+ if portdb.findname2(y, mytree=mytree)[0]:
+ dead_nodes[mytree].discard(y)
+
+ for mytree, nodes in dead_nodes.items():
+ auxdb = portdb.auxdb[mytree]
+ for y in nodes:
+ try:
+ del auxdb[y]
+ except (KeyError, CacheError):
+ pass
+
+ def _schedule_tasks(self):
+ """
+ @rtype: bool
+ @returns: True if there may be remaining tasks to schedule,
+ False otherwise.
+ """
+ if self._terminated_tasks:
+ return False
+
+ while self._can_add_job():
+ try:
+ metadata_process = next(self._process_iter)
+ except StopIteration:
+ return False
+
+ self._jobs += 1
+ self._running_tasks.add(metadata_process)
+ metadata_process.scheduler = self.sched_iface
+ metadata_process.addExitListener(self._metadata_exit)
+ metadata_process.start()
+ return True
+
+ def _metadata_exit(self, metadata_process):
+ self._jobs -= 1
+ self._running_tasks.discard(metadata_process)
+ if metadata_process.returncode != os.EX_OK:
+ self.returncode = 1
+ self._error_count += 1
+ self._valid_pkgs.discard(metadata_process.cpv)
+ if not self._terminated_tasks:
+ portage.writemsg("Error processing %s, continuing...\n" % \
+ (metadata_process.cpv,), noiselevel=-1)
+
+ if self._consumer is not None:
+ # On failure, still notify the consumer (in this case the metadata
+ # argument is None).
+ self._consumer(metadata_process.cpv,
+ metadata_process.ebuild_path,
+ metadata_process.repo_path,
+ metadata_process.metadata)
+
+ self._schedule()
+
diff --git a/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
new file mode 100644
index 0000000..ce0ab14
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
@@ -0,0 +1,33 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.doebuild:spawn'
+)
+from portage import os
+
+class MiscFunctionsProcess(AbstractEbuildProcess):
+ """
+ Spawns misc-functions.sh with an existing ebuild environment.
+ """
+
+ __slots__ = ('commands',)
+
+ def _start(self):
+ settings = self.settings
+ portage_bin_path = settings["PORTAGE_BIN_PATH"]
+ misc_sh_binary = os.path.join(portage_bin_path,
+ os.path.basename(portage.const.MISC_SH_BINARY))
+
+ self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
+ if self.logfile is None and \
+ self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ self.logfile = settings.get("PORTAGE_LOG_FILE")
+
+ AbstractEbuildProcess._start(self)
+
+ def _spawn(self, args, **kwargs):
+ self.settings.pop("EBUILD_PHASE", None)
+ return spawn(" ".join(args), self.settings, **kwargs)
diff --git a/portage_with_autodep/pym/_emerge/Package.py b/portage_with_autodep/pym/_emerge/Package.py
new file mode 100644
index 0000000..20c72b4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Package.py
@@ -0,0 +1,700 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from itertools import chain
+import portage
+from portage import _encodings, _unicode_decode, _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.const import EBUILD_PHASES
+from portage.dep import Atom, check_required_use, use_reduce, \
+ paren_enclose, _slot_re, _slot_separator, _repo_separator
+from portage.eapi import eapi_has_iuse_defaults, eapi_has_required_use
+from portage.exception import InvalidDependString
+from portage.repository.config import _gen_valid_repo
+from _emerge.Task import Task
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class Package(Task):
+
+ __hash__ = Task.__hash__
+ __slots__ = ("built", "cpv", "depth",
+ "installed", "metadata", "onlydeps", "operation",
+ "root_config", "type_name",
+ "category", "counter", "cp", "cpv_split",
+ "inherited", "invalid", "iuse", "masks", "mtime",
+ "pf", "pv_split", "root", "slot", "slot_atom", "visible",) + \
+ ("_raw_metadata", "_use",)
+
+ metadata_keys = [
+ "BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "EAPI",
+ "INHERITED", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
+ "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE",
+ "_mtime_", "DEFINED_PHASES", "REQUIRED_USE"]
+
+ _dep_keys = ('DEPEND', 'PDEPEND', 'RDEPEND',)
+ _use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
+ UNKNOWN_REPO = "__unknown__"
+
+ def __init__(self, **kwargs):
+ Task.__init__(self, **kwargs)
+ # the SlotObject constructor assigns self.root_config from keyword args
+ # and is an instance of a '_emerge.RootConfig.RootConfig class
+ self.root = self.root_config.root
+ self._raw_metadata = _PackageMetadataWrapperBase(self.metadata)
+ self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
+ if not self.built:
+ self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
+ self.cp = portage.cpv_getkey(self.cpv)
+ slot = self.slot
+ if _slot_re.match(slot) is None:
+ self._invalid_metadata('SLOT.invalid',
+ "SLOT: invalid value: '%s'" % slot)
+ # Avoid an InvalidAtom exception when creating slot_atom.
+ # This package instance will be masked due to empty SLOT.
+ slot = '0'
+ if (self.iuse.enabled or self.iuse.disabled) and \
+ not eapi_has_iuse_defaults(self.metadata["EAPI"]):
+ if not self.installed:
+ self._invalid_metadata('EAPI.incompatible',
+ "IUSE contains defaults, but EAPI doesn't allow them")
+ self.slot_atom = portage.dep.Atom("%s%s%s" % (self.cp, _slot_separator, slot))
+ self.category, self.pf = portage.catsplit(self.cpv)
+ self.cpv_split = portage.catpkgsplit(self.cpv)
+ self.pv_split = self.cpv_split[1:]
+ if self.inherited is None:
+ self.inherited = frozenset()
+ repo = _gen_valid_repo(self.metadata.get('repository', ''))
+ if not repo:
+ repo = self.UNKNOWN_REPO
+ self.metadata['repository'] = repo
+
+ self._validate_deps()
+ self.masks = self._masks()
+ self.visible = self._visible(self.masks)
+ if self.operation is None:
+ if self.onlydeps or self.installed:
+ self.operation = "nomerge"
+ else:
+ self.operation = "merge"
+
+ self._hash_key = Package._gen_hash_key(cpv=self.cpv,
+ installed=self.installed, onlydeps=self.onlydeps,
+ operation=self.operation, repo_name=repo,
+ root_config=self.root_config,
+ type_name=self.type_name)
+ self._hash_value = hash(self._hash_key)
+
+ @classmethod
+ def _gen_hash_key(cls, cpv=None, installed=None, onlydeps=None,
+ operation=None, repo_name=None, root_config=None,
+ type_name=None, **kwargs):
+
+ if operation is None:
+ if installed or onlydeps:
+ operation = "nomerge"
+ else:
+ operation = "merge"
+
+ root = None
+ if root_config is not None:
+ root = root_config.root
+ else:
+ raise TypeError("root_config argument is required")
+
+ # For installed (and binary) packages we don't care for the repo
+ # when it comes to hashing, because there can only be one cpv.
+ # So overwrite the repo_key with type_name.
+ if type_name is None:
+ raise TypeError("type_name argument is required")
+ elif type_name == "ebuild":
+ if repo_name is None:
+ raise AssertionError(
+ "Package._gen_hash_key() " + \
+ "called without 'repo_name' argument")
+ repo_key = repo_name
+ else:
+ # For installed (and binary) packages we don't care for the repo
+ # when it comes to hashing, because there can only be one cpv.
+ # So overwrite the repo_key with type_name.
+ repo_key = type_name
+
+ return (type_name, root, cpv, operation, repo_key)
+
+ def _validate_deps(self):
+ """
+ Validate deps. This does not trigger USE calculation since that
+ is expensive for ebuilds and therefore we want to avoid doing
+ in unnecessarily (like for masked packages).
+ """
+ eapi = self.metadata['EAPI']
+ dep_eapi = eapi
+ dep_valid_flag = self.iuse.is_valid_flag
+ if self.installed:
+ # Ignore EAPI.incompatible and conditionals missing
+ # from IUSE for installed packages since these issues
+ # aren't relevant now (re-evaluate when new EAPIs are
+ # deployed).
+ dep_eapi = None
+ dep_valid_flag = None
+
+ for k in self._dep_keys:
+ v = self.metadata.get(k)
+ if not v:
+ continue
+ try:
+ use_reduce(v, eapi=dep_eapi, matchall=True,
+ is_valid_flag=dep_valid_flag, token_class=Atom)
+ except InvalidDependString as e:
+ self._metadata_exception(k, e)
+
+ k = 'PROVIDE'
+ v = self.metadata.get(k)
+ if v:
+ try:
+ use_reduce(v, eapi=dep_eapi, matchall=True,
+ is_valid_flag=dep_valid_flag, token_class=Atom)
+ except InvalidDependString as e:
+ self._invalid_metadata("PROVIDE.syntax",
+ _unicode_decode("%s: %s") % (k, e))
+
+ for k in self._use_conditional_misc_keys:
+ v = self.metadata.get(k)
+ if not v:
+ continue
+ try:
+ use_reduce(v, eapi=dep_eapi, matchall=True,
+ is_valid_flag=dep_valid_flag)
+ except InvalidDependString as e:
+ self._metadata_exception(k, e)
+
+ k = 'REQUIRED_USE'
+ v = self.metadata.get(k)
+ if v:
+ if not eapi_has_required_use(eapi):
+ self._invalid_metadata('EAPI.incompatible',
+ "REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
+ else:
+ try:
+ check_required_use(v, (),
+ self.iuse.is_valid_flag)
+ except InvalidDependString as e:
+ # Force unicode format string for python-2.x safety,
+ # ensuring that PortageException.__unicode__() is used
+ # when necessary.
+ self._invalid_metadata(k + ".syntax",
+ _unicode_decode("%s: %s") % (k, e))
+
+ k = 'SRC_URI'
+ v = self.metadata.get(k)
+ if v:
+ try:
+ use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
+ is_valid_flag=self.iuse.is_valid_flag)
+ except InvalidDependString as e:
+ if not self.installed:
+ self._metadata_exception(k, e)
+
+ def copy(self):
+ return Package(built=self.built, cpv=self.cpv, depth=self.depth,
+ installed=self.installed, metadata=self._raw_metadata,
+ onlydeps=self.onlydeps, operation=self.operation,
+ root_config=self.root_config, type_name=self.type_name)
+
+ def _masks(self):
+ masks = {}
+ settings = self.root_config.settings
+
+ if self.invalid is not None:
+ masks['invalid'] = self.invalid
+
+ if not settings._accept_chost(self.cpv, self.metadata):
+ masks['CHOST'] = self.metadata['CHOST']
+
+ eapi = self.metadata["EAPI"]
+ if not portage.eapi_is_supported(eapi):
+ masks['EAPI.unsupported'] = eapi
+ if portage._eapi_is_deprecated(eapi):
+ masks['EAPI.deprecated'] = eapi
+
+ missing_keywords = settings._getMissingKeywords(
+ self.cpv, self.metadata)
+ if missing_keywords:
+ masks['KEYWORDS'] = missing_keywords
+
+ try:
+ missing_properties = settings._getMissingProperties(
+ self.cpv, self.metadata)
+ if missing_properties:
+ masks['PROPERTIES'] = missing_properties
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ mask_atom = settings._getMaskAtom(self.cpv, self.metadata)
+ if mask_atom is not None:
+ masks['package.mask'] = mask_atom
+
+ system_mask = settings._getProfileMaskAtom(
+ self.cpv, self.metadata)
+ if system_mask is not None:
+ masks['profile.system'] = system_mask
+
+ try:
+ missing_licenses = settings._getMissingLicenses(
+ self.cpv, self.metadata)
+ if missing_licenses:
+ masks['LICENSE'] = missing_licenses
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ if not masks:
+ masks = None
+
+ return masks
+
+ def _visible(self, masks):
+
+ if masks is not None:
+
+ if 'EAPI.unsupported' in masks:
+ return False
+
+ if 'invalid' in masks:
+ return False
+
+ if not self.installed and ( \
+ 'CHOST' in masks or \
+ 'EAPI.deprecated' in masks or \
+ 'KEYWORDS' in masks or \
+ 'PROPERTIES' in masks):
+ return False
+
+ if 'package.mask' in masks or \
+ 'profile.system' in masks or \
+ 'LICENSE' in masks:
+ return False
+
+ return True
+
+ def get_keyword_mask(self):
+ """returns None, 'missing', or 'unstable'."""
+
+ missing = self.root_config.settings._getRawMissingKeywords(
+ self.cpv, self.metadata)
+
+ if not missing:
+ return None
+
+ if '**' in missing:
+ return 'missing'
+
+ global_accept_keywords = frozenset(
+ self.root_config.settings.get("ACCEPT_KEYWORDS", "").split())
+
+ for keyword in missing:
+ if keyword.lstrip("~") in global_accept_keywords:
+ return 'unstable'
+
+ return 'missing'
+
+ def isHardMasked(self):
+ """returns a bool if the cpv is in the list of
+ expanded pmaskdict[cp] available ebuilds"""
+ pmask = self.root_config.settings._getRawMaskAtom(
+ self.cpv, self.metadata)
+ return pmask is not None
+
+ def _metadata_exception(self, k, e):
+
+ # For unicode safety with python-2.x we need to avoid
+ # using the string format operator with a non-unicode
+ # format string, since that will result in the
+ # PortageException.__str__() method being invoked,
+ # followed by unsafe decoding that may result in a
+ # UnicodeDecodeError. Therefore, use _unicode_decode()
+ # to ensure that format strings are unicode, so that
+ # PortageException.__unicode__() is used when necessary
+ # in python-2.x.
+ if not self.installed:
+ categorized_error = False
+ if e.errors:
+ for error in e.errors:
+ if getattr(error, 'category', None) is None:
+ continue
+ categorized_error = True
+ self._invalid_metadata(error.category,
+ _unicode_decode("%s: %s") % (k, error))
+
+ if not categorized_error:
+ self._invalid_metadata(k + ".syntax",
+ _unicode_decode("%s: %s") % (k, e))
+ else:
+ # For installed packages, show the path of the file
+ # containing the invalid metadata, since the user may
+ # want to fix the deps by hand.
+ vardb = self.root_config.trees['vartree'].dbapi
+ path = vardb.getpath(self.cpv, filename=k)
+ self._invalid_metadata(k + ".syntax",
+ _unicode_decode("%s: %s in '%s'") % (k, e, path))
+
+ def _invalid_metadata(self, msg_type, msg):
+ if self.invalid is None:
+ self.invalid = {}
+ msgs = self.invalid.get(msg_type)
+ if msgs is None:
+ msgs = []
+ self.invalid[msg_type] = msgs
+ msgs.append(msg)
+
+ def __str__(self):
+ if self.operation == "merge":
+ if self.type_name == "binary":
+ cpv_color = "PKG_BINARY_MERGE"
+ else:
+ cpv_color = "PKG_MERGE"
+ elif self.operation == "uninstall":
+ cpv_color = "PKG_UNINSTALL"
+ else:
+ cpv_color = "PKG_NOMERGE"
+
+ s = "(%s, %s" \
+ % (portage.output.colorize(cpv_color, self.cpv + _repo_separator + self.repo) , self.type_name)
+
+ if self.type_name == "installed":
+ if self.root != "/":
+ s += " in '%s'" % self.root
+ if self.operation == "uninstall":
+ s += " scheduled for uninstall"
+ else:
+ if self.operation == "merge":
+ s += " scheduled for merge"
+ if self.root != "/":
+ s += " to '%s'" % self.root
+ s += ")"
+ return s
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+ class _use_class(object):
+
+ __slots__ = ("enabled", "_expand", "_expand_hidden",
+ "_force", "_pkg", "_mask")
+
+ # Share identical frozenset instances when available.
+ _frozensets = {}
+
+ def __init__(self, pkg, use_str):
+ self._pkg = pkg
+ self._expand = None
+ self._expand_hidden = None
+ self._force = None
+ self._mask = None
+ self.enabled = frozenset(use_str.split())
+ if pkg.built:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption).
+ missing_iuse = pkg.iuse.get_missing_iuse(self.enabled)
+ if missing_iuse:
+ self.enabled = self.enabled.difference(missing_iuse)
+
+ def _init_force_mask(self):
+ pkgsettings = self._pkg._get_pkgsettings()
+ frozensets = self._frozensets
+ s = frozenset(
+ pkgsettings.get("USE_EXPAND", "").lower().split())
+ self._expand = frozensets.setdefault(s, s)
+ s = frozenset(
+ pkgsettings.get("USE_EXPAND_HIDDEN", "").lower().split())
+ self._expand_hidden = frozensets.setdefault(s, s)
+ s = pkgsettings.useforce
+ self._force = frozensets.setdefault(s, s)
+ s = pkgsettings.usemask
+ self._mask = frozensets.setdefault(s, s)
+
+ @property
+ def expand(self):
+ if self._expand is None:
+ self._init_force_mask()
+ return self._expand
+
+ @property
+ def expand_hidden(self):
+ if self._expand_hidden is None:
+ self._init_force_mask()
+ return self._expand_hidden
+
+ @property
+ def force(self):
+ if self._force is None:
+ self._init_force_mask()
+ return self._force
+
+ @property
+ def mask(self):
+ if self._mask is None:
+ self._init_force_mask()
+ return self._mask
+
+ @property
+ def repo(self):
+ return self.metadata['repository']
+
+ @property
+ def repo_priority(self):
+ repo_info = self.root_config.settings.repositories.prepos.get(self.repo)
+ if repo_info is None:
+ return None
+ return repo_info.priority
+
+ @property
+ def use(self):
+ if self._use is None:
+ self.metadata._init_use()
+ return self._use
+
+ def _get_pkgsettings(self):
+ pkgsettings = self.root_config.trees[
+ 'porttree'].dbapi.doebuild_settings
+ pkgsettings.setcpv(self)
+ return pkgsettings
+
+ class _iuse(object):
+
+ __slots__ = ("__weakref__", "all", "enabled", "disabled",
+ "tokens") + ("_iuse_implicit_match",)
+
+ def __init__(self, tokens, iuse_implicit_match):
+ self.tokens = tuple(tokens)
+ self._iuse_implicit_match = iuse_implicit_match
+ enabled = []
+ disabled = []
+ other = []
+ for x in tokens:
+ prefix = x[:1]
+ if prefix == "+":
+ enabled.append(x[1:])
+ elif prefix == "-":
+ disabled.append(x[1:])
+ else:
+ other.append(x)
+ self.enabled = frozenset(enabled)
+ self.disabled = frozenset(disabled)
+ self.all = frozenset(chain(enabled, disabled, other))
+
+ def is_valid_flag(self, flags):
+ """
+ @returns: True if all flags are valid USE values which may
+ be specified in USE dependencies, False otherwise.
+ """
+ if isinstance(flags, basestring):
+ flags = [flags]
+
+ for flag in flags:
+ if not flag in self.all and \
+ not self._iuse_implicit_match(flag):
+ return False
+ return True
+
+ def get_missing_iuse(self, flags):
+ """
+ @returns: A list of flags missing from IUSE.
+ """
+ if isinstance(flags, basestring):
+ flags = [flags]
+ missing_iuse = []
+ for flag in flags:
+ if not flag in self.all and \
+ not self._iuse_implicit_match(flag):
+ missing_iuse.append(flag)
+ return missing_iuse
+
+ def __len__(self):
+ return 4
+
+ def __iter__(self):
+ """
+ This is used to generate mtimedb resume mergelist entries, so we
+ limit it to 4 items for backward compatibility.
+ """
+ return iter(self._hash_key[:4])
+
+ def __lt__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
+ return True
+ return False
+
+ def __le__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
+ return True
+ return False
+
+ def __gt__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
+ return True
+ return False
+
+ def __ge__(self, other):
+ if other.cp != self.cp:
+ return False
+ if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
+ return True
+ return False
+
+_all_metadata_keys = set(x for x in portage.auxdbkeys \
+ if not x.startswith("UNUSED_"))
+_all_metadata_keys.update(Package.metadata_keys)
+_all_metadata_keys = frozenset(_all_metadata_keys)
+
+_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
+
+class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
+ """
+ Detect metadata updates and synchronize Package attributes.
+ """
+
+ __slots__ = ("_pkg",)
+ _wrapped_keys = frozenset(
+ ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
+ _use_conditional_keys = frozenset(
+ ['LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',])
+
+ def __init__(self, pkg, metadata):
+ _PackageMetadataWrapperBase.__init__(self)
+ self._pkg = pkg
+ if not pkg.built:
+ # USE is lazy, but we want it to show up in self.keys().
+ _PackageMetadataWrapperBase.__setitem__(self, 'USE', '')
+
+ self.update(metadata)
+
+ def _init_use(self):
+ if self._pkg.built:
+ use_str = self['USE']
+ self._pkg._use = self._pkg._use_class(
+ self._pkg, use_str)
+ else:
+ try:
+ use_str = _PackageMetadataWrapperBase.__getitem__(self, 'USE')
+ except KeyError:
+ use_str = None
+ calculated_use = False
+ if not use_str:
+ use_str = self._pkg._get_pkgsettings()["PORTAGE_USE"]
+ calculated_use = True
+ _PackageMetadataWrapperBase.__setitem__(self, 'USE', use_str)
+ self._pkg._use = self._pkg._use_class(
+ self._pkg, use_str)
+ # Initialize these now, since USE access has just triggered
+ # setcpv, and we want to cache the result of the force/mask
+ # calculations that were done.
+ if calculated_use:
+ self._pkg._use._init_force_mask()
+
+ return use_str
+
+ def __getitem__(self, k):
+ v = _PackageMetadataWrapperBase.__getitem__(self, k)
+ if k in self._use_conditional_keys:
+ if self._pkg.root_config.settings.local_config and '?' in v:
+ try:
+ v = paren_enclose(use_reduce(v, uselist=self._pkg.use.enabled, \
+ is_valid_flag=self._pkg.iuse.is_valid_flag))
+ except InvalidDependString:
+ # This error should already have been registered via
+ # self._pkg._invalid_metadata().
+ pass
+ else:
+ self[k] = v
+
+ elif k == 'USE' and not self._pkg.built:
+ if not v:
+ # This is lazy because it's expensive.
+ v = self._init_use()
+
+ return v
+
+ def __setitem__(self, k, v):
+ _PackageMetadataWrapperBase.__setitem__(self, k, v)
+ if k in self._wrapped_keys:
+ getattr(self, "_set_" + k.lower())(k, v)
+
+ def _set_inherited(self, k, v):
+ if isinstance(v, basestring):
+ v = frozenset(v.split())
+ self._pkg.inherited = v
+
+ def _set_iuse(self, k, v):
+ self._pkg.iuse = self._pkg._iuse(
+ v.split(), self._pkg.root_config.settings._iuse_implicit_match)
+
+ def _set_slot(self, k, v):
+ self._pkg.slot = v
+
+ def _set_counter(self, k, v):
+ if isinstance(v, basestring):
+ try:
+ v = long(v.strip())
+ except ValueError:
+ v = 0
+ self._pkg.counter = v
+
+ def _set_use(self, k, v):
+ # Force regeneration of _use attribute
+ self._pkg._use = None
+ # Use raw metadata to restore USE conditional values
+ # to unevaluated state
+ raw_metadata = self._pkg._raw_metadata
+ for x in self._use_conditional_keys:
+ try:
+ self[x] = raw_metadata[x]
+ except KeyError:
+ pass
+
+ def _set__mtime_(self, k, v):
+ if isinstance(v, basestring):
+ try:
+ v = long(v.strip())
+ except ValueError:
+ v = 0
+ self._pkg.mtime = v
+
+ @property
+ def properties(self):
+ return self['PROPERTIES'].split()
+
+ @property
+ def restrict(self):
+ return self['RESTRICT'].split()
+
+ @property
+ def defined_phases(self):
+ """
+ Returns tokens from DEFINED_PHASES metadata if it is defined,
+ otherwise returns a tuple containing all possible phases. This
+ makes it easy to do containment checks to see if it's safe to
+ skip execution of a given phase.
+ """
+ s = self['DEFINED_PHASES']
+ if s:
+ return s.split()
+ return EBUILD_PHASES
diff --git a/portage_with_autodep/pym/_emerge/PackageArg.py b/portage_with_autodep/pym/_emerge/PackageArg.py
new file mode 100644
index 0000000..ebfe4b2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageArg.py
@@ -0,0 +1,19 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DependencyArg import DependencyArg
+from _emerge.Package import Package
+import portage
+from portage._sets.base import InternalPackageSet
+from portage.dep import _repo_separator
+
+class PackageArg(DependencyArg):
+ def __init__(self, package=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.package = package
+ atom = "=" + package.cpv
+ if package.repo != Package.UNKNOWN_REPO:
+ atom += _repo_separator + package.repo
+ self.atom = portage.dep.Atom(atom, allow_repo=True)
+ self.pset = InternalPackageSet(initial_atoms=(self.atom,),
+ allow_repo=True)
diff --git a/portage_with_autodep/pym/_emerge/PackageMerge.py b/portage_with_autodep/pym/_emerge/PackageMerge.py
new file mode 100644
index 0000000..f8fa04a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageMerge.py
@@ -0,0 +1,40 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from portage.output import colorize
+class PackageMerge(CompositeTask):
+ __slots__ = ("merge",)
+
+ def _start(self):
+
+ self.scheduler = self.merge.scheduler
+ pkg = self.merge.pkg
+ pkg_count = self.merge.pkg_count
+
+ if pkg.installed:
+ action_desc = "Uninstalling"
+ preposition = "from"
+ counter_str = ""
+ else:
+ action_desc = "Installing"
+ preposition = "to"
+ counter_str = "(%s of %s) " % \
+ (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
+
+ msg = "%s %s%s" % \
+ (action_desc,
+ counter_str,
+ colorize("GOOD", pkg.cpv))
+
+ if pkg.root != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ if not self.merge.build_opts.fetchonly and \
+ not self.merge.build_opts.pretend and \
+ not self.merge.build_opts.buildpkgonly:
+ self.merge.statusMessage(msg)
+
+ task = self.merge.create_install_task()
+ self._start_task(task, self._default_final_exit)
diff --git a/portage_with_autodep/pym/_emerge/PackageUninstall.py b/portage_with_autodep/pym/_emerge/PackageUninstall.py
new file mode 100644
index 0000000..eb6a947
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageUninstall.py
@@ -0,0 +1,110 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import portage
+from portage import os
+from portage.dbapi._MergeProcess import MergeProcess
+from portage.exception import UnsupportedAPIException
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.emergelog import emergelog
+from _emerge.CompositeTask import CompositeTask
+from _emerge.unmerge import _unmerge_display
+
+class PackageUninstall(CompositeTask):
+ """
+ Uninstall a package asynchronously in a subprocess. When
+ both parallel-install and ebuild-locks FEATURES are enabled,
+ it is essential for the ebuild-locks code to execute in a
+ subprocess, since the portage.locks module does not behave
+ as desired if we try to lock the same file multiple times
+ concurrently from the same process for ebuild-locks phases
+ such as pkg_setup, pkg_prerm, and pkg_postrm.
+ """
+
+ __slots__ = ("world_atom", "ldpath_mtimes", "opts",
+ "pkg", "settings", "_builddir_lock")
+
+ def _start(self):
+
+ vardb = self.pkg.root_config.trees["vartree"].dbapi
+ dbdir = vardb.getpath(self.pkg.cpv)
+ if not os.path.exists(dbdir):
+ # Apparently the package got uninstalled
+ # already, so we can safely return early.
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self.settings.setcpv(self.pkg)
+ cat, pf = portage.catsplit(self.pkg.cpv)
+ myebuildpath = os.path.join(dbdir, pf + ".ebuild")
+
+ try:
+ portage.doebuild_environment(myebuildpath, "prerm",
+ settings=self.settings, db=vardb)
+ except UnsupportedAPIException:
+ # This is safe to ignore since this function is
+ # guaranteed to set PORTAGE_BUILDDIR even though
+ # it raises UnsupportedAPIException. The error
+ # will be logged when it prevents the pkg_prerm
+ # and pkg_postrm phases from executing.
+ pass
+
+ self._builddir_lock = EbuildBuildDir(
+ scheduler=self.scheduler, settings=self.settings)
+ self._builddir_lock.lock()
+
+ portage.prepare_build_dirs(
+ settings=self.settings, cleanup=True)
+
+ # Output only gets logged if it comes after prepare_build_dirs()
+ # which initializes PORTAGE_LOG_FILE.
+ retval, pkgmap = _unmerge_display(self.pkg.root_config,
+ self.opts, "unmerge", [self.pkg.cpv], clean_delay=0,
+ writemsg_level=self._writemsg_level)
+
+ if retval != os.EX_OK:
+ self._builddir_lock.unlock()
+ self.returncode = retval
+ self.wait()
+ return
+
+ self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv,),
+ noiselevel=-1)
+ self._emergelog("=== Unmerging... (%s)" % (self.pkg.cpv,))
+
+ unmerge_task = MergeProcess(
+ mycat=cat, mypkg=pf, settings=self.settings,
+ treetype="vartree", vartree=self.pkg.root_config.trees["vartree"],
+ scheduler=self.scheduler, background=self.background,
+ mydbapi=self.pkg.root_config.trees["vartree"].dbapi,
+ prev_mtimes=self.ldpath_mtimes,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"), unmerge=True)
+
+ self._start_task(unmerge_task, self._unmerge_exit)
+
+ def _unmerge_exit(self, unmerge_task):
+ if self._final_exit(unmerge_task) != os.EX_OK:
+ self._emergelog(" !!! unmerge FAILURE: %s" % (self.pkg.cpv,))
+ else:
+ self._emergelog(" >>> unmerge success: %s" % (self.pkg.cpv,))
+ self.world_atom(self.pkg)
+ self._builddir_lock.unlock()
+ self.wait()
+
+ def _emergelog(self, msg):
+ emergelog("notitles" not in self.settings.features, msg)
+
+ def _writemsg_level(self, msg, level=0, noiselevel=0):
+
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ background = self.background
+
+ if log_path is None:
+ if not (background and level < logging.WARNING):
+ portage.util.writemsg_level(msg,
+ level=level, noiselevel=noiselevel)
+ else:
+ self.scheduler.output(msg, log_path=log_path,
+ level=level, noiselevel=noiselevel)
diff --git a/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
new file mode 100644
index 0000000..a692bb6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
@@ -0,0 +1,145 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.dbapi import dbapi
+
+class PackageVirtualDbapi(dbapi):
+ """
+ A dbapi-like interface class that represents the state of the installed
+ package database as new packages are installed, replacing any packages
+ that previously existed in the same slot. The main difference between
+ this class and fakedbapi is that this one uses Package instances
+ internally (passed in via cpv_inject() and cpv_remove() calls).
+ """
+ def __init__(self, settings):
+ dbapi.__init__(self)
+ self.settings = settings
+ self._match_cache = {}
+ self._cp_map = {}
+ self._cpv_map = {}
+
+ def clear(self):
+ """
+ Remove all packages.
+ """
+ if self._cpv_map:
+ self._clear_cache()
+ self._cp_map.clear()
+ self._cpv_map.clear()
+
+ def copy(self):
+ obj = PackageVirtualDbapi(self.settings)
+ obj._match_cache = self._match_cache.copy()
+ obj._cp_map = self._cp_map.copy()
+ for k, v in obj._cp_map.items():
+ obj._cp_map[k] = v[:]
+ obj._cpv_map = self._cpv_map.copy()
+ return obj
+
+ def __bool__(self):
+ return bool(self._cpv_map)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __iter__(self):
+ return iter(self._cpv_map.values())
+
+ def __contains__(self, item):
+ existing = self._cpv_map.get(item.cpv)
+ if existing is not None and \
+ existing == item:
+ return True
+ return False
+
+ def get(self, item, default=None):
+ cpv = getattr(item, "cpv", None)
+ if cpv is None:
+ if len(item) != 5:
+ return default
+ type_name, root, cpv, operation, repo_key = item
+
+ existing = self._cpv_map.get(cpv)
+ if existing is not None and \
+ existing == item:
+ return existing
+ return default
+
+ def match_pkgs(self, atom):
+ return [self._cpv_map[cpv] for cpv in self.match(atom)]
+
+ def _clear_cache(self):
+ if self._categories is not None:
+ self._categories = None
+ if self._match_cache:
+ self._match_cache = {}
+
+ def match(self, origdep, use_cache=1):
+ result = self._match_cache.get(origdep)
+ if result is not None:
+ return result[:]
+ result = dbapi.match(self, origdep, use_cache=use_cache)
+ self._match_cache[origdep] = result
+ return result[:]
+
+ def cpv_exists(self, cpv, myrepo=None):
+ return cpv in self._cpv_map
+
+ def cp_list(self, mycp, use_cache=1):
+ cachelist = self._match_cache.get(mycp)
+ # cp_list() doesn't expand old-style virtuals
+ if cachelist and cachelist[0].startswith(mycp):
+ return cachelist[:]
+ cpv_list = self._cp_map.get(mycp)
+ if cpv_list is None:
+ cpv_list = []
+ else:
+ cpv_list = [pkg.cpv for pkg in cpv_list]
+ self._cpv_sort_ascending(cpv_list)
+ if not (not cpv_list and mycp.startswith("virtual/")):
+ self._match_cache[mycp] = cpv_list
+ return cpv_list[:]
+
+ def cp_all(self):
+ return list(self._cp_map)
+
+ def cpv_all(self):
+ return list(self._cpv_map)
+
+ def cpv_inject(self, pkg):
+ cp_list = self._cp_map.get(pkg.cp)
+ if cp_list is None:
+ cp_list = []
+ self._cp_map[pkg.cp] = cp_list
+ e_pkg = self._cpv_map.get(pkg.cpv)
+ if e_pkg is not None:
+ if e_pkg == pkg:
+ return
+ self.cpv_remove(e_pkg)
+ for e_pkg in cp_list:
+ if e_pkg.slot_atom == pkg.slot_atom:
+ if e_pkg == pkg:
+ return
+ self.cpv_remove(e_pkg)
+ break
+ cp_list.append(pkg)
+ self._cpv_map[pkg.cpv] = pkg
+ self._clear_cache()
+
+ def cpv_remove(self, pkg):
+ old_pkg = self._cpv_map.get(pkg.cpv)
+ if old_pkg != pkg:
+ raise KeyError(pkg)
+ self._cp_map[pkg.cp].remove(pkg)
+ del self._cpv_map[pkg.cpv]
+ self._clear_cache()
+
+ def aux_get(self, cpv, wants, myrepo=None):
+ metadata = self._cpv_map[cpv].metadata
+ return [metadata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ self._cpv_map[cpv].metadata.update(values)
+ self._clear_cache()
+
diff --git a/portage_with_autodep/pym/_emerge/PipeReader.py b/portage_with_autodep/pym/_emerge/PipeReader.py
new file mode 100644
index 0000000..375c98f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PipeReader.py
@@ -0,0 +1,96 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.PollConstants import PollConstants
+import fcntl
+import array
+
+class PipeReader(AbstractPollTask):
+
+ """
+ Reads output from one or more files and saves it in memory,
+ for retrieval via the getvalue() method. This is driven by
+ the scheduler's poll() loop, so it runs entirely within the
+ current process.
+ """
+
+ __slots__ = ("input_files",) + \
+ ("_read_data", "_reg_ids")
+
+ def _start(self):
+ self._reg_ids = set()
+ self._read_data = []
+ for k, f in self.input_files.items():
+ fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
+ fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+ self._reg_ids.add(self.scheduler.register(f.fileno(),
+ self._registered_events, self._output_handler))
+ self._registered = True
+
+ def isAlive(self):
+ return self._registered
+
+ def _cancel(self):
+ if self.returncode is None:
+ self.returncode = 1
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+
+ if self._registered:
+ self.scheduler.schedule(self._reg_ids)
+ self._unregister()
+
+ self.returncode = os.EX_OK
+ return self.returncode
+
+ def getvalue(self):
+ """Retrieve the entire contents"""
+ return b''.join(self._read_data)
+
+ def close(self):
+ """Free the memory buffer."""
+ self._read_data = None
+
+ def _output_handler(self, fd, event):
+
+ if event & PollConstants.POLLIN:
+
+ for f in self.input_files.values():
+ if fd == f.fileno():
+ break
+
+ buf = array.array('B')
+ try:
+ buf.fromfile(f, self._bufsize)
+ except (EOFError, IOError):
+ pass
+
+ if buf:
+ self._read_data.append(buf.tostring())
+ else:
+ self._unregister()
+ self.wait()
+
+ self._unregister_if_appropriate(event)
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._reg_ids is not None:
+ for reg_id in self._reg_ids:
+ self.scheduler.unregister(reg_id)
+ self._reg_ids = None
+
+ if self.input_files is not None:
+ for f in self.input_files.values():
+ f.close()
+ self.input_files = None
+
diff --git a/portage_with_autodep/pym/_emerge/PollConstants.py b/portage_with_autodep/pym/_emerge/PollConstants.py
new file mode 100644
index 0000000..d0270a9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PollConstants.py
@@ -0,0 +1,18 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import select
+class PollConstants(object):
+
+ """
+ Provides POLL* constants that are equivalent to those from the
+ select module, for use by PollSelectAdapter.
+ """
+
+ names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
+ v = 1
+ for k in names:
+ locals()[k] = getattr(select, k, v)
+ v *= 2
+ del k, v
+
diff --git a/portage_with_autodep/pym/_emerge/PollScheduler.py b/portage_with_autodep/pym/_emerge/PollScheduler.py
new file mode 100644
index 0000000..a2b5c24
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PollScheduler.py
@@ -0,0 +1,398 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import errno
+import logging
+import select
+import time
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+from portage import _encodings
+from portage import _unicode_encode
+from portage.util import writemsg_level
+
+from _emerge.SlotObject import SlotObject
+from _emerge.getloadavg import getloadavg
+from _emerge.PollConstants import PollConstants
+from _emerge.PollSelectAdapter import PollSelectAdapter
+
+class PollScheduler(object):
+
+ class _sched_iface_class(SlotObject):
+ __slots__ = ("output", "register", "schedule", "unregister")
+
+ def __init__(self):
+ self._terminated = threading.Event()
+ self._terminated_tasks = False
+ self._max_jobs = 1
+ self._max_load = None
+ self._jobs = 0
+ self._poll_event_queue = []
+ self._poll_event_handlers = {}
+ self._poll_event_handler_ids = {}
+ # Increment id for each new handler.
+ self._event_handler_id = 0
+ self._poll_obj = create_poll_instance()
+ self._scheduling = False
+ self._background = False
+ self.sched_iface = self._sched_iface_class(
+ output=self._task_output,
+ register=self._register,
+ schedule=self._schedule_wait,
+ unregister=self._unregister)
+
+ def terminate(self):
+ """
+ Schedules asynchronous, graceful termination of the scheduler
+ at the earliest opportunity.
+
+ This method is thread-safe (and safe for signal handlers).
+ """
+ self._terminated.set()
+
+ def _terminate_tasks(self):
+ """
+ Send signals to terminate all tasks. This is called once
+ from self._schedule() in the event dispatching thread. This
+ prevents it from being called while the _schedule_tasks()
+ implementation is running, in order to avoid potential
+ interference. All tasks should be cleaned up at the earliest
+ opportunity, but not necessarily before this method returns.
+ """
+ raise NotImplementedError()
+
+ def _schedule_tasks(self):
+ """
+ This is called from inside the _schedule() method, which
+ guarantees the following:
+
+ 1) It will not be called recursively.
+ 2) _terminate_tasks() will not be called while it is running.
+ 3) The state of the boolean _terminated_tasks variable will
+ not change while it is running.
+
+ Unless this method is used to perform user interface updates,
+ or something like that, the first thing it should do is check
+ the state of _terminated_tasks and if that is True then it
+ should return False immediately (since there's no need to
+ schedule anything after _terminate_tasks() has been called).
+ """
+ raise NotImplementedError()
+
+ def _schedule(self):
+ """
+ Calls _schedule_tasks() and automatically returns early from
+ any recursive calls to this method that the _schedule_tasks()
+ call might trigger. This makes _schedule() safe to call from
+ inside exit listeners.
+ """
+ if self._scheduling:
+ return False
+ self._scheduling = True
+ try:
+
+ if self._terminated.is_set() and \
+ not self._terminated_tasks:
+ self._terminated_tasks = True
+ self._terminate_tasks()
+
+ return self._schedule_tasks()
+ finally:
+ self._scheduling = False
+
+ def _running_job_count(self):
+ return self._jobs
+
+ def _can_add_job(self):
+ if self._terminated_tasks:
+ return False
+
+ max_jobs = self._max_jobs
+ max_load = self._max_load
+
+ if self._max_jobs is not True and \
+ self._running_job_count() >= self._max_jobs:
+ return False
+
+ if max_load is not None and \
+ (max_jobs is True or max_jobs > 1) and \
+ self._running_job_count() >= 1:
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ if avg1 >= max_load:
+ return False
+
+ return True
+
+ def _poll(self, timeout=None):
+ """
+ All poll() calls pass through here. The poll events
+ are added directly to self._poll_event_queue.
+ In order to avoid endless blocking, this raises
+ StopIteration if timeout is None and there are
+ no file descriptors to poll.
+ """
+ if not self._poll_event_handlers:
+ self._schedule()
+ if timeout is None and \
+ not self._poll_event_handlers:
+ raise StopIteration(
+ "timeout is None and there are no poll() event handlers")
+
+ # The following error is known to occur with Linux kernel versions
+ # less than 2.6.24:
+ #
+ # select.error: (4, 'Interrupted system call')
+ #
+ # This error has been observed after a SIGSTOP, followed by SIGCONT.
+ # Treat it similar to EAGAIN if timeout is None, otherwise just return
+ # without any events.
+ while True:
+ try:
+ self._poll_event_queue.extend(self._poll_obj.poll(timeout))
+ break
+ except select.error as e:
+ writemsg_level("\n!!! select error: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ del e
+ if timeout is not None:
+ break
+
+ def _next_poll_event(self, timeout=None):
+ """
+ Since the _schedule_wait() loop is called by event
+ handlers from _poll_loop(), maintain a central event
+ queue for both of them to share events from a single
+ poll() call. In order to avoid endless blocking, this
+ raises StopIteration if timeout is None and there are
+ no file descriptors to poll.
+ """
+ if not self._poll_event_queue:
+ self._poll(timeout)
+ if not self._poll_event_queue:
+ raise StopIteration()
+ return self._poll_event_queue.pop()
+
+ def _poll_loop(self):
+
+ event_handlers = self._poll_event_handlers
+ event_handled = False
+
+ try:
+ while event_handlers:
+ f, event = self._next_poll_event()
+ handler, reg_id = event_handlers[f]
+ handler(f, event)
+ event_handled = True
+ except StopIteration:
+ event_handled = True
+
+ if not event_handled:
+ raise AssertionError("tight loop")
+
+ def _schedule_yield(self):
+ """
+ Schedule for a short period of time chosen by the scheduler based
+ on internal state. Synchronous tasks should call this periodically
+ in order to allow the scheduler to service pending poll events. The
+ scheduler will call poll() exactly once, without blocking, and any
+ resulting poll events will be serviced.
+ """
+ event_handlers = self._poll_event_handlers
+ events_handled = 0
+
+ if not event_handlers:
+ return bool(events_handled)
+
+ if not self._poll_event_queue:
+ self._poll(0)
+
+ try:
+ while event_handlers and self._poll_event_queue:
+ f, event = self._next_poll_event()
+ handler, reg_id = event_handlers[f]
+ handler(f, event)
+ events_handled += 1
+ except StopIteration:
+ events_handled += 1
+
+ return bool(events_handled)
+
+ def _register(self, f, eventmask, handler):
+ """
+ @rtype: Integer
+ @return: A unique registration id, for use in schedule() or
+ unregister() calls.
+ """
+ if f in self._poll_event_handlers:
+ raise AssertionError("fd %d is already registered" % f)
+ self._event_handler_id += 1
+ reg_id = self._event_handler_id
+ self._poll_event_handler_ids[reg_id] = f
+ self._poll_event_handlers[f] = (handler, reg_id)
+ self._poll_obj.register(f, eventmask)
+ return reg_id
+
+ def _unregister(self, reg_id):
+ f = self._poll_event_handler_ids[reg_id]
+ self._poll_obj.unregister(f)
+ if self._poll_event_queue:
+ # Discard any unhandled events that belong to this file,
+ # in order to prevent these events from being erroneously
+ # delivered to a future handler that is using a reallocated
+ # file descriptor of the same numeric value (causing
+ # extremely confusing bugs).
+ remaining_events = []
+ discarded_events = False
+ for event in self._poll_event_queue:
+ if event[0] == f:
+ discarded_events = True
+ else:
+ remaining_events.append(event)
+
+ if discarded_events:
+ self._poll_event_queue[:] = remaining_events
+
+ del self._poll_event_handlers[f]
+ del self._poll_event_handler_ids[reg_id]
+
+ def _schedule_wait(self, wait_ids=None, timeout=None, condition=None):
+ """
+ Schedule until wait_id is not longer registered
+ for poll() events.
+ @type wait_id: int
+ @param wait_id: a task id to wait for
+ """
+ event_handlers = self._poll_event_handlers
+ handler_ids = self._poll_event_handler_ids
+ event_handled = False
+
+ if isinstance(wait_ids, int):
+ wait_ids = frozenset([wait_ids])
+
+ start_time = None
+ remaining_timeout = timeout
+ timed_out = False
+ if timeout is not None:
+ start_time = time.time()
+ try:
+ while (wait_ids is None and event_handlers) or \
+ (wait_ids is not None and wait_ids.intersection(handler_ids)):
+ f, event = self._next_poll_event(timeout=remaining_timeout)
+ handler, reg_id = event_handlers[f]
+ handler(f, event)
+ event_handled = True
+ if condition is not None and condition():
+ break
+ if timeout is not None:
+ elapsed_time = time.time() - start_time
+ if elapsed_time < 0:
+ # The system clock has changed such that start_time
+ # is now in the future, so just assume that the
+ # timeout has already elapsed.
+ timed_out = True
+ break
+ remaining_timeout = timeout - 1000 * elapsed_time
+ if remaining_timeout <= 0:
+ timed_out = True
+ break
+ except StopIteration:
+ event_handled = True
+
+ return event_handled
+
+ def _task_output(self, msg, log_path=None, background=None,
+ level=0, noiselevel=-1):
+ """
+ Output msg to stdout if not self._background. If log_path
+ is not None then append msg to the log (appends with
+ compression if the filename extension of log_path
+ corresponds to a supported compression type).
+ """
+
+ if background is None:
+ # If the task does not have a local background value
+ # (like for parallel-fetch), then use the global value.
+ background = self._background
+
+ msg_shown = False
+ if not background:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ msg_shown = True
+
+ if log_path is not None:
+ try:
+ f = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='ab')
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ if not msg_shown:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+
+ if log_path.endswith('.gz'):
+ # NOTE: The empty filename argument prevents us from
+ # triggering a bug in python3 which causes GzipFile
+ # to raise AttributeError if fileobj.name is bytes
+ # instead of unicode.
+ f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+ f.write(_unicode_encode(msg))
+ f.close()
+
+_can_poll_device = None
+
+def can_poll_device():
+ """
+ Test if it's possible to use poll() on a device such as a pty. This
+ is known to fail on Darwin.
+ @rtype: bool
+ @returns: True if poll() on a device succeeds, False otherwise.
+ """
+
+ global _can_poll_device
+ if _can_poll_device is not None:
+ return _can_poll_device
+
+ if not hasattr(select, "poll"):
+ _can_poll_device = False
+ return _can_poll_device
+
+ try:
+ dev_null = open('/dev/null', 'rb')
+ except IOError:
+ _can_poll_device = False
+ return _can_poll_device
+
+ p = select.poll()
+ p.register(dev_null.fileno(), PollConstants.POLLIN)
+
+ invalid_request = False
+ for f, event in p.poll():
+ if event & PollConstants.POLLNVAL:
+ invalid_request = True
+ break
+ dev_null.close()
+
+ _can_poll_device = not invalid_request
+ return _can_poll_device
+
+def create_poll_instance():
+ """
+ Create an instance of select.poll, or an instance of
+ PollSelectAdapter there is no poll() implementation or
+ it is broken somehow.
+ """
+ if can_poll_device():
+ return select.poll()
+ return PollSelectAdapter()
diff --git a/portage_with_autodep/pym/_emerge/PollSelectAdapter.py b/portage_with_autodep/pym/_emerge/PollSelectAdapter.py
new file mode 100644
index 0000000..c11dab8
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PollSelectAdapter.py
@@ -0,0 +1,73 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.PollConstants import PollConstants
+import select
+class PollSelectAdapter(PollConstants):
+
+ """
+ Use select to emulate a poll object, for
+ systems that don't support poll().
+ """
+
+ def __init__(self):
+ self._registered = {}
+ self._select_args = [[], [], []]
+
+ def register(self, fd, *args):
+ """
+ Only POLLIN is currently supported!
+ """
+ if len(args) > 1:
+ raise TypeError(
+ "register expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+
+ eventmask = PollConstants.POLLIN | \
+ PollConstants.POLLPRI | PollConstants.POLLOUT
+ if args:
+ eventmask = args[0]
+
+ self._registered[fd] = eventmask
+ self._select_args = None
+
+ def unregister(self, fd):
+ self._select_args = None
+ del self._registered[fd]
+
+ def poll(self, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "poll expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+
+ timeout = None
+ if args:
+ timeout = args[0]
+
+ select_args = self._select_args
+ if select_args is None:
+ select_args = [list(self._registered), [], []]
+
+ if timeout is not None:
+ select_args = select_args[:]
+ # Translate poll() timeout args to select() timeout args:
+ #
+ # | units | value(s) for indefinite block
+ # ---------|--------------|------------------------------
+ # poll | milliseconds | omitted, negative, or None
+ # ---------|--------------|------------------------------
+ # select | seconds | omitted
+ # ---------|--------------|------------------------------
+
+ if timeout is not None and timeout < 0:
+ timeout = None
+ if timeout is not None:
+ select_args.append(timeout / 1000)
+
+ select_events = select.select(*select_args)
+ poll_events = []
+ for fd in select_events[0]:
+ poll_events.append((fd, PollConstants.POLLIN))
+ return poll_events
+
diff --git a/portage_with_autodep/pym/_emerge/ProgressHandler.py b/portage_with_autodep/pym/_emerge/ProgressHandler.py
new file mode 100644
index 0000000..f5afe6d
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/ProgressHandler.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+class ProgressHandler(object):
+ def __init__(self):
+ self.curval = 0
+ self.maxval = 0
+ self._last_update = 0
+ self.min_latency = 0.2
+
+ def onProgress(self, maxval, curval):
+ self.maxval = maxval
+ self.curval = curval
+ cur_time = time.time()
+ if cur_time - self._last_update >= self.min_latency:
+ self._last_update = cur_time
+ self.display()
+
+ def display(self):
+ raise NotImplementedError(self)
+
diff --git a/portage_with_autodep/pym/_emerge/QueueScheduler.py b/portage_with_autodep/pym/_emerge/QueueScheduler.py
new file mode 100644
index 0000000..a4ab328
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/QueueScheduler.py
@@ -0,0 +1,116 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+
+from _emerge.PollScheduler import PollScheduler
+
+class QueueScheduler(PollScheduler):
+
+ """
+ Add instances of SequentialTaskQueue and then call run(). The
+ run() method returns when no tasks remain.
+ """
+
+ def __init__(self, max_jobs=None, max_load=None):
+ PollScheduler.__init__(self)
+
+ if max_jobs is None:
+ max_jobs = 1
+
+ self._max_jobs = max_jobs
+ self._max_load = max_load
+
+ self._queues = []
+ self._schedule_listeners = []
+
+ def add(self, q):
+ self._queues.append(q)
+
+ def remove(self, q):
+ self._queues.remove(q)
+
+ def clear(self):
+ for q in self._queues:
+ q.clear()
+
+ def run(self, timeout=None):
+
+ start_time = None
+ timed_out = False
+ remaining_timeout = timeout
+ if timeout is not None:
+ start_time = time.time()
+
+ while self._schedule():
+ self._schedule_wait(timeout=remaining_timeout)
+ if timeout is not None:
+ elapsed_time = time.time() - start_time
+ if elapsed_time < 0:
+ # The system clock has changed such that start_time
+ # is now in the future, so just assume that the
+ # timeout has already elapsed.
+ timed_out = True
+ break
+ remaining_timeout = timeout - 1000 * elapsed_time
+ if remaining_timeout <= 0:
+ timed_out = True
+ break
+
+ if timeout is None or not timed_out:
+ while self._running_job_count():
+ self._schedule_wait(timeout=remaining_timeout)
+ if timeout is not None:
+ elapsed_time = time.time() - start_time
+ if elapsed_time < 0:
+ # The system clock has changed such that start_time
+ # is now in the future, so just assume that the
+ # timeout has already elapsed.
+ timed_out = True
+ break
+ remaining_timeout = timeout - 1000 * elapsed_time
+ if remaining_timeout <= 0:
+ timed_out = True
+ break
+
+ def _schedule_tasks(self):
+ """
+ @rtype: bool
+ @returns: True if there may be remaining tasks to schedule,
+ False otherwise.
+ """
+ if self._terminated_tasks:
+ return False
+
+ while self._can_add_job():
+ n = self._max_jobs - self._running_job_count()
+ if n < 1:
+ break
+
+ if not self._start_next_job(n):
+ return False
+
+ for q in self._queues:
+ if q:
+ return True
+ return False
+
+ def _running_job_count(self):
+ job_count = 0
+ for q in self._queues:
+ job_count += len(q.running_tasks)
+ self._jobs = job_count
+ return job_count
+
+ def _start_next_job(self, n=1):
+ started_count = 0
+ for q in self._queues:
+ initial_job_count = len(q.running_tasks)
+ q.schedule()
+ final_job_count = len(q.running_tasks)
+ if final_job_count > initial_job_count:
+ started_count += (final_job_count - initial_job_count)
+ if started_count >= n:
+ break
+ return started_count
+
diff --git a/portage_with_autodep/pym/_emerge/RootConfig.py b/portage_with_autodep/pym/_emerge/RootConfig.py
new file mode 100644
index 0000000..d84f108
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/RootConfig.py
@@ -0,0 +1,34 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class RootConfig(object):
+ """This is used internally by depgraph to track information about a
+ particular $ROOT."""
+ __slots__ = ("root", "setconfig", "sets", "settings", "trees")
+
+ pkg_tree_map = {
+ "ebuild" : "porttree",
+ "binary" : "bintree",
+ "installed" : "vartree"
+ }
+
+ tree_pkg_map = {}
+ for k, v in pkg_tree_map.items():
+ tree_pkg_map[v] = k
+
+ def __init__(self, settings, trees, setconfig):
+ self.trees = trees
+ self.settings = settings
+ self.root = self.settings["ROOT"]
+ self.setconfig = setconfig
+ if setconfig is None:
+ self.sets = {}
+ else:
+ self.sets = self.setconfig.getSets()
+
+ def update(self, other):
+ """
+ Shallow copy all attributes from another instance.
+ """
+ for k in self.__slots__:
+ setattr(self, k, getattr(other, k))
diff --git a/portage_with_autodep/pym/_emerge/Scheduler.py b/portage_with_autodep/pym/_emerge/Scheduler.py
new file mode 100644
index 0000000..6412d82
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Scheduler.py
@@ -0,0 +1,1975 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from collections import deque
+import gc
+import gzip
+import logging
+import shutil
+import signal
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+import weakref
+import zlib
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.elog.messages import eerror
+from portage.localization import _
+from portage.output import colorize, create_color_func, red
+bad = create_color_func("BAD")
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import writemsg, writemsg_level
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.digestgen import digestgen
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+
+import _emerge
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.Blocker import Blocker
+from _emerge.BlockerDB import BlockerDB
+from _emerge.clear_caches import clear_caches
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.create_world_atom import create_world_atom
+from _emerge.DepPriority import DepPriority
+from _emerge.depgraph import depgraph, resume_depgraph
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.FakeVartree import FakeVartree
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from _emerge.JobStatusDisplay import JobStatusDisplay
+from _emerge.MergeListItem import MergeListItem
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.Package import Package
+from _emerge.PackageMerge import PackageMerge
+from _emerge.PollScheduler import PollScheduler
+from _emerge.RootConfig import RootConfig
+from _emerge.SlotObject import SlotObject
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class Scheduler(PollScheduler):
+
+ # max time between display status updates (milliseconds)
+ _max_display_latency = 3000
+
+ _opts_ignore_blockers = \
+ frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri",
+ "--nodeps", "--pretend"])
+
+ _opts_no_background = \
+ frozenset(["--pretend",
+ "--fetchonly", "--fetch-all-uri"])
+
+ _opts_no_restart = frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri", "--pretend"])
+
+ _bad_resume_opts = set(["--ask", "--changelog",
+ "--resume", "--skipfirst"])
+
+ class _iface_class(SlotObject):
+ __slots__ = ("fetch",
+ "output", "register", "schedule",
+ "scheduleSetup", "scheduleUnpack", "scheduleYield",
+ "unregister")
+
+ class _fetch_iface_class(SlotObject):
+ __slots__ = ("log_file", "schedule")
+
+ _task_queues_class = slot_dict_class(
+ ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
+
+ class _build_opts_class(SlotObject):
+ __slots__ = ("buildpkg", "buildpkgonly",
+ "fetch_all_uri", "fetchonly", "pretend")
+
+ class _binpkg_opts_class(SlotObject):
+ __slots__ = ("fetchonly", "getbinpkg", "pretend")
+
+ class _pkg_count_class(SlotObject):
+ __slots__ = ("curval", "maxval")
+
+ class _emerge_log_class(SlotObject):
+ __slots__ = ("xterm_titles",)
+
+ def log(self, *pargs, **kwargs):
+ if not self.xterm_titles:
+ # Avoid interference with the scheduler's status display.
+ kwargs.pop("short_msg", None)
+ emergelog(self.xterm_titles, *pargs, **kwargs)
+
+ class _failed_pkg(SlotObject):
+ __slots__ = ("build_dir", "build_log", "pkg", "returncode")
+
+ class _ConfigPool(object):
+ """Interface for a task to temporarily allocate a config
+ instance from a pool. This allows a task to be constructed
+ long before the config instance actually becomes needed, like
+ when prefetchers are constructed for the whole merge list."""
+ __slots__ = ("_root", "_allocate", "_deallocate")
+ def __init__(self, root, allocate, deallocate):
+ self._root = root
+ self._allocate = allocate
+ self._deallocate = deallocate
+ def allocate(self):
+ return self._allocate(self._root)
+ def deallocate(self, settings):
+ self._deallocate(settings)
+
+ class _unknown_internal_error(portage.exception.PortageException):
+ """
+ Used internally to terminate scheduling. The specific reason for
+ the failure should have been dumped to stderr.
+ """
+ def __init__(self, value=""):
+ portage.exception.PortageException.__init__(self, value)
+
+ def __init__(self, settings, trees, mtimedb, myopts,
+ spinner, mergelist=None, favorites=None, graph_config=None):
+ PollScheduler.__init__(self)
+
+ if mergelist is not None:
+ warnings.warn("The mergelist parameter of the " + \
+ "_emerge.Scheduler constructor is now unused. Use " + \
+ "the graph_config parameter instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.settings = settings
+ self.target_root = settings["ROOT"]
+ self.trees = trees
+ self.myopts = myopts
+ self._spinner = spinner
+ self._mtimedb = mtimedb
+ self._favorites = favorites
+ self._args_set = InternalPackageSet(favorites, allow_repo=True)
+ self._build_opts = self._build_opts_class()
+ for k in self._build_opts.__slots__:
+ setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
+ self._binpkg_opts = self._binpkg_opts_class()
+ for k in self._binpkg_opts.__slots__:
+ setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
+
+ self.curval = 0
+ self._logger = self._emerge_log_class()
+ self._task_queues = self._task_queues_class()
+ for k in self._task_queues.allowed_keys:
+ setattr(self._task_queues, k,
+ SequentialTaskQueue())
+
+ # Holds merges that will wait to be executed when no builds are
+ # executing. This is useful for system packages since dependencies
+ # on system packages are frequently unspecified. For example, see
+ # bug #256616.
+ self._merge_wait_queue = deque()
+ # Holds merges that have been transfered from the merge_wait_queue to
+ # the actual merge queue. They are removed from this list upon
+ # completion. Other packages can start building only when this list is
+ # empty.
+ self._merge_wait_scheduled = []
+
+ # Holds system packages and their deep runtime dependencies. Before
+ # being merged, these packages go to merge_wait_queue, to be merged
+ # when no other packages are building.
+ self._deep_system_deps = set()
+
+ # Holds packages to merge which will satisfy currently unsatisfied
+ # deep runtime dependencies of system packages. If this is not empty
+ # then no parallel builds will be spawned until it is empty. This
+ # minimizes the possibility that a build will fail due to the system
+ # being in a fragile state. For example, see bug #259954.
+ self._unsatisfied_system_deps = set()
+
+ self._status_display = JobStatusDisplay(
+ xterm_titles=('notitles' not in settings.features))
+ self._max_load = myopts.get("--load-average")
+ max_jobs = myopts.get("--jobs")
+ if max_jobs is None:
+ max_jobs = 1
+ self._set_max_jobs(max_jobs)
+
+ # The root where the currently running
+ # portage instance is installed.
+ self._running_root = trees["/"]["root_config"]
+ self.edebug = 0
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ self.edebug = 1
+ self.pkgsettings = {}
+ self._config_pool = {}
+ for root in self.trees:
+ self._config_pool[root] = []
+
+ self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
+ 'emerge-fetch.log')
+ fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
+ schedule=self._schedule_fetch)
+ self._sched_iface = self._iface_class(
+ fetch=fetch_iface, output=self._task_output,
+ register=self._register,
+ schedule=self._schedule_wait,
+ scheduleSetup=self._schedule_setup,
+ scheduleUnpack=self._schedule_unpack,
+ scheduleYield=self._schedule_yield,
+ unregister=self._unregister)
+
+ self._prefetchers = weakref.WeakValueDictionary()
+ self._pkg_queue = []
+ self._running_tasks = {}
+ self._completed_tasks = set()
+
+ self._failed_pkgs = []
+ self._failed_pkgs_all = []
+ self._failed_pkgs_die_msgs = []
+ self._post_mod_echo_msgs = []
+ self._parallel_fetch = False
+ self._init_graph(graph_config)
+ merge_count = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._pkg_count = self._pkg_count_class(
+ curval=0, maxval=merge_count)
+ self._status_display.maxval = self._pkg_count.maxval
+
+ # The load average takes some time to respond when new
+ # jobs are added, so we need to limit the rate of adding
+ # new jobs.
+ self._job_delay_max = 10
+ self._job_delay_factor = 1.0
+ self._job_delay_exp = 1.5
+ self._previous_job_start_time = None
+
+ # This is used to memoize the _choose_pkg() result when
+ # no packages can be chosen until one of the existing
+ # jobs completes.
+ self._choose_pkg_return_early = False
+
+ features = self.settings.features
+ if "parallel-fetch" in features and \
+ not ("--pretend" in self.myopts or \
+ "--fetch-all-uri" in self.myopts or \
+ "--fetchonly" in self.myopts):
+ if "distlocks" not in features:
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ portage.writemsg(red("!!!")+" parallel-fetching " + \
+ "requires the distlocks feature enabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+" you have it disabled, " + \
+ "thus parallel-fetching is being disabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ elif merge_count > 1:
+ self._parallel_fetch = True
+
+ if self._parallel_fetch:
+ # clear out existing fetch log if it exists
+ try:
+ open(self._fetch_log, 'w')
+ except EnvironmentError:
+ pass
+
+ self._running_portage = None
+ portage_match = self._running_root.trees["vartree"].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM)
+ if portage_match:
+ cpv = portage_match.pop()
+ self._running_portage = self._pkg(cpv, "installed",
+ self._running_root, installed=True)
+
+ def _terminate_tasks(self):
+ self._status_display.quiet = True
+ while self._running_tasks:
+ task_id, task = self._running_tasks.popitem()
+ task.cancel()
+ for q in self._task_queues.values():
+ q.clear()
+
+ def _init_graph(self, graph_config):
+ """
+ Initialization structures used for dependency calculations
+ involving currently installed packages.
+ """
+ self._set_graph_config(graph_config)
+ self._blocker_db = {}
+ for root in self.trees:
+ if graph_config is None:
+ fake_vartree = FakeVartree(self.trees[root]["root_config"],
+ pkg_cache=self._pkg_cache)
+ fake_vartree.sync()
+ else:
+ fake_vartree = graph_config.trees[root]['vartree']
+ self._blocker_db[root] = BlockerDB(fake_vartree)
+
+ def _destroy_graph(self):
+ """
+ Use this to free memory at the beginning of _calc_resume_list().
+ After _calc_resume_list(), the _init_graph() method
+ must to be called in order to re-generate the structures that
+ this method destroys.
+ """
+ self._blocker_db = None
+ self._set_graph_config(None)
+ gc.collect()
+
+ def _poll(self, timeout=None):
+
+ self._schedule()
+
+ if timeout is None:
+ while True:
+ if not self._poll_event_handlers:
+ self._schedule()
+ if not self._poll_event_handlers:
+ raise StopIteration(
+ "timeout is None and there are no poll() event handlers")
+ previous_count = len(self._poll_event_queue)
+ PollScheduler._poll(self, timeout=self._max_display_latency)
+ self._status_display.display()
+ if previous_count != len(self._poll_event_queue):
+ break
+
+ elif timeout <= self._max_display_latency:
+ PollScheduler._poll(self, timeout=timeout)
+ if timeout == 0:
+ # The display is updated by _schedule() above, so it would be
+ # redundant to update it here when timeout is 0.
+ pass
+ else:
+ self._status_display.display()
+
+ else:
+ remaining_timeout = timeout
+ start_time = time.time()
+ while True:
+ previous_count = len(self._poll_event_queue)
+ PollScheduler._poll(self,
+ timeout=min(self._max_display_latency, remaining_timeout))
+ self._status_display.display()
+ if previous_count != len(self._poll_event_queue):
+ break
+ elapsed_time = time.time() - start_time
+ if elapsed_time < 0:
+ # The system clock has changed such that start_time
+ # is now in the future, so just assume that the
+ # timeout has already elapsed.
+ break
+ remaining_timeout = timeout - 1000 * elapsed_time
+ if remaining_timeout <= 0:
+ break
+
+ def _set_max_jobs(self, max_jobs):
+ self._max_jobs = max_jobs
+ self._task_queues.jobs.max_jobs = max_jobs
+ if "parallel-install" in self.settings.features:
+ self._task_queues.merge.max_jobs = max_jobs
+
+ def _background_mode(self):
+ """
+ Check if background mode is enabled and adjust states as necessary.
+
+ @rtype: bool
+ @returns: True if background mode is enabled, False otherwise.
+ """
+ background = (self._max_jobs is True or \
+ self._max_jobs > 1 or "--quiet" in self.myopts \
+ or "--quiet-build" in self.myopts) and \
+ not bool(self._opts_no_background.intersection(self.myopts))
+
+ if background:
+ interactive_tasks = self._get_interactive_tasks()
+ if interactive_tasks:
+ background = False
+ writemsg_level(">>> Sending package output to stdio due " + \
+ "to interactive package(s):\n",
+ level=logging.INFO, noiselevel=-1)
+ msg = [""]
+ for pkg in interactive_tasks:
+ pkg_str = " " + colorize("INFORM", str(pkg.cpv))
+ if pkg.root != "/":
+ pkg_str += " for " + pkg.root
+ msg.append(pkg_str)
+ msg.append("")
+ writemsg_level("".join("%s\n" % (l,) for l in msg),
+ level=logging.INFO, noiselevel=-1)
+ if self._max_jobs is True or self._max_jobs > 1:
+ self._set_max_jobs(1)
+ writemsg_level(">>> Setting --jobs=1 due " + \
+ "to the above interactive package(s)\n",
+ level=logging.INFO, noiselevel=-1)
+ writemsg_level(">>> In order to temporarily mask " + \
+ "interactive updates, you may\n" + \
+ ">>> specify --accept-properties=-interactive\n",
+ level=logging.INFO, noiselevel=-1)
+ self._status_display.quiet = \
+ not background or \
+ ("--quiet" in self.myopts and \
+ "--verbose" not in self.myopts)
+
+ self._logger.xterm_titles = \
+ "notitles" not in self.settings.features and \
+ self._status_display.quiet
+
+ return background
+
+ def _get_interactive_tasks(self):
+ interactive_tasks = []
+ for task in self._mergelist:
+ if not (isinstance(task, Package) and \
+ task.operation == "merge"):
+ continue
+ if 'interactive' in task.metadata.properties:
+ interactive_tasks.append(task)
+ return interactive_tasks
+
+ def _set_graph_config(self, graph_config):
+
+ if graph_config is None:
+ self._graph_config = None
+ self._pkg_cache = {}
+ self._digraph = None
+ self._mergelist = []
+ self._deep_system_deps.clear()
+ return
+
+ self._graph_config = graph_config
+ self._pkg_cache = graph_config.pkg_cache
+ self._digraph = graph_config.graph
+ self._mergelist = graph_config.mergelist
+
+ if "--nodeps" in self.myopts or \
+ (self._max_jobs is not True and self._max_jobs < 2):
+ # save some memory
+ self._digraph = None
+ graph_config.graph = None
+ graph_config.pkg_cache.clear()
+ self._deep_system_deps.clear()
+ for pkg in self._mergelist:
+ self._pkg_cache[pkg] = pkg
+ return
+
+ self._find_system_deps()
+ self._prune_digraph()
+ self._prevent_builddir_collisions()
+ if '--debug' in self.myopts:
+ writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
+ self._digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ def _find_system_deps(self):
+ """
+ Find system packages and their deep runtime dependencies. Before being
+ merged, these packages go to merge_wait_queue, to be merged when no
+ other packages are building.
+ NOTE: This can only find deep system deps if the system set has been
+ added to the graph and traversed deeply (the depgraph "complete"
+ parameter will do this, triggered by emerge --complete-graph option).
+ """
+ deep_system_deps = self._deep_system_deps
+ deep_system_deps.clear()
+ deep_system_deps.update(
+ _find_deep_system_runtime_deps(self._digraph))
+ deep_system_deps.difference_update([pkg for pkg in \
+ deep_system_deps if pkg.operation != "merge"])
+
+ def _prune_digraph(self):
+ """
+ Prune any root nodes that are irrelevant.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+ removed_nodes = set()
+ while True:
+ for node in graph.root_nodes():
+ if not isinstance(node, Package) or \
+ (node.installed and node.operation == "nomerge") or \
+ node.onlydeps or \
+ node in completed_tasks:
+ removed_nodes.add(node)
+ if removed_nodes:
+ graph.difference_update(removed_nodes)
+ if not removed_nodes:
+ break
+ removed_nodes.clear()
+
+ def _prevent_builddir_collisions(self):
+ """
+ When building stages, sometimes the same exact cpv needs to be merged
+ to both $ROOTs. Add edges to the digraph in order to avoid collisions
+ in the builddir. Currently, normal file locks would be inappropriate
+ for this purpose since emerge holds all of it's build dir locks from
+ the main process.
+ """
+ cpv_map = {}
+ for pkg in self._mergelist:
+ if not isinstance(pkg, Package):
+ # a satisfied blocker
+ continue
+ if pkg.installed:
+ continue
+ if pkg.cpv not in cpv_map:
+ cpv_map[pkg.cpv] = [pkg]
+ continue
+ for earlier_pkg in cpv_map[pkg.cpv]:
+ self._digraph.add(earlier_pkg, pkg,
+ priority=DepPriority(buildtime=True))
+ cpv_map[pkg.cpv].append(pkg)
+
+ class _pkg_failure(portage.exception.PortageException):
+ """
+ An instance of this class is raised by unmerge() when
+ an uninstallation fails.
+ """
+ status = 1
+ def __init__(self, *pargs):
+ portage.exception.PortageException.__init__(self, pargs)
+ if pargs:
+ self.status = pargs[0]
+
+ def _schedule_fetch(self, fetcher):
+ """
+ Schedule a fetcher, in order to control the number of concurrent
+ fetchers. If self._max_jobs is greater than 1 then the fetch
+ queue is bypassed and the fetcher is started immediately,
+ otherwise it is added to the front of the parallel-fetch queue.
+ NOTE: The parallel-fetch queue is currently used to serialize
+ access to the parallel-fetch log, so changes in the log handling
+ would be required before it would be possible to enable
+ concurrent fetching within the parallel-fetch queue.
+ """
+ if self._max_jobs > 1:
+ fetcher.start()
+ else:
+ self._task_queues.fetch.addFront(fetcher)
+
+ def _schedule_setup(self, setup_phase):
+ """
+ Schedule a setup phase on the merge queue, in order to
+ serialize unsandboxed access to the live filesystem.
+ """
+ if self._task_queues.merge.max_jobs > 1 and \
+ "ebuild-locks" in self.settings.features:
+ # Use a separate queue for ebuild-locks when the merge
+ # queue allows more than 1 job (due to parallel-install),
+ # since the portage.locks module does not behave as desired
+ # if we try to lock the same file multiple times
+ # concurrently from the same process.
+ self._task_queues.ebuild_locks.add(setup_phase)
+ else:
+ self._task_queues.merge.add(setup_phase)
+ self._schedule()
+
+ def _schedule_unpack(self, unpack_phase):
+ """
+ Schedule an unpack phase on the unpack queue, in order
+ to serialize $DISTDIR access for live ebuilds.
+ """
+ self._task_queues.unpack.add(unpack_phase)
+
+ def _find_blockers(self, new_pkg):
+ """
+ Returns a callable.
+ """
+ def get_blockers():
+ return self._find_blockers_impl(new_pkg)
+ return get_blockers
+
+ def _find_blockers_impl(self, new_pkg):
+ if self._opts_ignore_blockers.intersection(self.myopts):
+ return None
+
+ blocker_db = self._blocker_db[new_pkg.root]
+
+ blocker_dblinks = []
+ for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
+ if new_pkg.slot_atom == blocking_pkg.slot_atom:
+ continue
+ if new_pkg.cpv == blocking_pkg.cpv:
+ continue
+ blocker_dblinks.append(portage.dblink(
+ blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
+ self.pkgsettings[blocking_pkg.root], treetype="vartree",
+ vartree=self.trees[blocking_pkg.root]["vartree"]))
+
+ return blocker_dblinks
+
+ def _generate_digests(self):
+ """
+ Generate digests if necessary for --digests or FEATURES=digest.
+ In order to avoid interference, this must done before parallel
+ tasks are started.
+ """
+
+ if '--fetchonly' in self.myopts:
+ return os.EX_OK
+
+ digest = '--digest' in self.myopts
+ if not digest:
+ for pkgsettings in self.pkgsettings.values():
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if 'digest' in pkgsettings.features:
+ digest = True
+ break
+
+ if not digest:
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != 'ebuild' or \
+ x.operation != 'merge':
+ continue
+ pkgsettings = self.pkgsettings[x.root]
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if '--digest' not in self.myopts and \
+ 'digest' not in pkgsettings.features:
+ continue
+ portdb = x.root_config.trees['porttree'].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ pkgsettings['O'] = os.path.dirname(ebuild_path)
+ if not digestgen(mysettings=pkgsettings, myportdb=portdb):
+ writemsg_level(
+ "!!! Unable to generate manifest for '%s'.\n" \
+ % x.cpv, level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ return os.EX_OK
+
+ def _env_sanity_check(self):
+ """
+ Verify a sane environment before trying to build anything from source.
+ """
+ have_src_pkg = False
+ for x in self._mergelist:
+ if isinstance(x, Package) and not x.built:
+ have_src_pkg = True
+ break
+
+ if not have_src_pkg:
+ return os.EX_OK
+
+ for settings in self.pkgsettings.values():
+ for var in ("ARCH", ):
+ value = settings.get(var)
+ if value and value.strip():
+ continue
+ msg = _("%(var)s is not set... "
+ "Are you missing the '%(configroot)setc/make.profile' symlink? "
+ "Is the symlink correct? "
+ "Is your portage tree complete?") % \
+ {"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
+
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 70):
+ out.eerror(line)
+ return 1
+
+ return os.EX_OK
+
+ def _check_manifests(self):
+ # Verify all the manifests now so that the user is notified of failure
+ # as soon as possible.
+ if "strict" not in self.settings.features or \
+ "--fetchonly" in self.myopts or \
+ "--fetch-all-uri" in self.myopts:
+ return os.EX_OK
+
+ shown_verifying_msg = False
+ quiet_settings = {}
+ for myroot, pkgsettings in self.pkgsettings.items():
+ quiet_config = portage.config(clone=pkgsettings)
+ quiet_config["PORTAGE_QUIET"] = "1"
+ quiet_config.backup_changes("PORTAGE_QUIET")
+ quiet_settings[myroot] = quiet_config
+ del quiet_config
+
+ failures = 0
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != "ebuild":
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if not shown_verifying_msg:
+ shown_verifying_msg = True
+ self._status_msg("Verifying ebuild manifests")
+
+ root_config = x.root_config
+ portdb = root_config.trees["porttree"].dbapi
+ quiet_config = quiet_settings[root_config.root]
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ quiet_config["O"] = os.path.dirname(ebuild_path)
+ if not digestcheck([], quiet_config, strict=True):
+ failures |= 1
+
+ if failures:
+ return 1
+ return os.EX_OK
+
+ def _add_prefetchers(self):
+
+ if not self._parallel_fetch:
+ return
+
+ if self._parallel_fetch:
+ self._status_msg("Starting parallel fetch")
+
+ prefetchers = self._prefetchers
+ getbinpkg = "--getbinpkg" in self.myopts
+
+ for pkg in self._mergelist:
+ # mergelist can contain solved Blocker instances
+ if not isinstance(pkg, Package) or pkg.operation == "uninstall":
+ continue
+ prefetcher = self._create_prefetcher(pkg)
+ if prefetcher is not None:
+ self._task_queues.fetch.add(prefetcher)
+ prefetchers[pkg] = prefetcher
+
+ # Start the first prefetcher immediately so that self._task()
+ # won't discard it. This avoids a case where the first
+ # prefetcher is discarded, causing the second prefetcher to
+ # occupy the fetch queue before the first fetcher has an
+ # opportunity to execute.
+ self._task_queues.fetch.schedule()
+
+ def _create_prefetcher(self, pkg):
+ """
+ @return: a prefetcher, or None if not applicable
+ """
+ prefetcher = None
+
+ if not isinstance(pkg, Package):
+ pass
+
+ elif pkg.type_name == "ebuild":
+
+ prefetcher = EbuildFetcher(background=True,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ fetchonly=1, logfile=self._fetch_log,
+ pkg=pkg, prefetch=True, scheduler=self._sched_iface)
+
+ elif pkg.type_name == "binary" and \
+ "--getbinpkg" in self.myopts and \
+ pkg.root_config.trees["bintree"].isremote(pkg.cpv):
+
+ prefetcher = BinpkgPrefetcher(background=True,
+ pkg=pkg, scheduler=self._sched_iface)
+
+ return prefetcher
+
+ def _is_restart_scheduled(self):
+ """
+ Check if the merge list contains a replacement
+ for the current running instance, that will result
+ in restart after merge.
+ @rtype: bool
+ @returns: True if a restart is scheduled, False otherwise.
+ """
+ if self._opts_no_restart.intersection(self.myopts):
+ return False
+
+ mergelist = self._mergelist
+
+ for i, pkg in enumerate(mergelist):
+ if self._is_restart_necessary(pkg) and \
+ i != len(mergelist) - 1:
+ return True
+
+ return False
+
+ def _is_restart_necessary(self, pkg):
+ """
+ @return: True if merging the given package
+ requires restart, False otherwise.
+ """
+
+ # Figure out if we need a restart.
+ if pkg.root == self._running_root.root and \
+ portage.match_from_list(
+ portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
+ if self._running_portage is None:
+ return True
+ elif pkg.cpv != self._running_portage.cpv or \
+ '9999' in pkg.cpv or \
+ 'git' in pkg.inherited or \
+ 'git-2' in pkg.inherited:
+ return True
+ return False
+
+ def _restart_if_necessary(self, pkg):
+ """
+ Use execv() to restart emerge. This happens
+ if portage upgrades itself and there are
+ remaining packages in the list.
+ """
+
+ if self._opts_no_restart.intersection(self.myopts):
+ return
+
+ if not self._is_restart_necessary(pkg):
+ return
+
+ if pkg == self._mergelist[-1]:
+ return
+
+ self._main_loop_cleanup()
+
+ logger = self._logger
+ pkg_count = self._pkg_count
+ mtimedb = self._mtimedb
+ bad_resume_opts = self._bad_resume_opts
+
+ logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ logger.log(" *** RESTARTING " + \
+ "emerge via exec() after change of " + \
+ "portage version.")
+
+ mtimedb["resume"]["mergelist"].remove(list(pkg))
+ mtimedb.commit()
+ portage.run_exitfuncs()
+ # Don't trust sys.argv[0] here because eselect-python may modify it.
+ emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge')
+ mynewargv = [emerge_binary, "--resume"]
+ resume_opts = self.myopts.copy()
+ # For automatic resume, we need to prevent
+ # any of bad_resume_opts from leaking in
+ # via EMERGE_DEFAULT_OPTS.
+ resume_opts["--ignore-default-opts"] = True
+ for myopt, myarg in resume_opts.items():
+ if myopt not in bad_resume_opts:
+ if myarg is True:
+ mynewargv.append(myopt)
+ elif isinstance(myarg, list):
+ # arguments like --exclude that use 'append' action
+ for x in myarg:
+ mynewargv.append("%s=%s" % (myopt, x))
+ else:
+ mynewargv.append("%s=%s" % (myopt, myarg))
+ # priority only needs to be adjusted on the first run
+ os.environ["PORTAGE_NICENESS"] = "0"
+ os.execv(mynewargv[0], mynewargv)
+
+ def _run_pkg_pretend(self):
+ """
+ Since pkg_pretend output may be important, this method sends all
+ output directly to stdout (regardless of options like --quiet or
+ --jobs).
+ """
+
+ failures = 0
+
+ # Use a local PollScheduler instance here, since we don't
+ # want tasks here to trigger the usual Scheduler callbacks
+ # that handle job scheduling and status display.
+ sched_iface = PollScheduler().sched_iface
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if x.metadata["EAPI"] in ("0", "1", "2", "3"):
+ continue
+
+ if "pretend" not in x.metadata.defined_phases:
+ continue
+
+ out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
+ portage.util.writemsg_stdout(out_str, noiselevel=-1)
+
+ root_config = x.root_config
+ settings = self.pkgsettings[root_config.root]
+ settings.setcpv(x)
+ tmpdir = tempfile.mkdtemp()
+ tmpdir_orig = settings["PORTAGE_TMPDIR"]
+ settings["PORTAGE_TMPDIR"] = tmpdir
+
+ try:
+ if x.built:
+ tree = "bintree"
+ bintree = root_config.trees["bintree"].dbapi.bintree
+ fetched = False
+
+ # Display fetch on stdout, so that it's always clear what
+ # is consuming time here.
+ if bintree.isremote(x.cpv):
+ fetcher = BinpkgFetcher(pkg=x,
+ scheduler=sched_iface)
+ fetcher.start()
+ if fetcher.wait() != os.EX_OK:
+ failures += 1
+ continue
+ fetched = fetcher.pkg_path
+
+ verifier = BinpkgVerifier(pkg=x,
+ scheduler=sched_iface)
+ verifier.start()
+ if verifier.wait() != os.EX_OK:
+ failures += 1
+ continue
+
+ if fetched:
+ bintree.inject(x.cpv, filename=fetched)
+ tbz2_file = bintree.getname(x.cpv)
+ infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
+ os.makedirs(infloc)
+ portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ if self._build_opts.buildpkgonly:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+ else:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+
+ portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
+ "pretend", settings=settings,
+ db=self.trees[settings["ROOT"]][tree].dbapi)
+ prepare_build_dirs(root_config.root, settings, cleanup=0)
+
+ vardb = root_config.trees['vartree'].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(x.slot_atom) + \
+ vardb.match('='+x.cpv)))
+ pretend_phase = EbuildPhase(
+ phase="pretend", scheduler=sched_iface,
+ settings=settings)
+
+ pretend_phase.start()
+ ret = pretend_phase.wait()
+ if ret != os.EX_OK:
+ failures += 1
+ portage.elog.elog_process(x.cpv, settings)
+ finally:
+ shutil.rmtree(tmpdir)
+ settings["PORTAGE_TMPDIR"] = tmpdir_orig
+
+ if failures:
+ return 1
+ return os.EX_OK
+
+ def merge(self):
+ if "--resume" in self.myopts:
+ # We're resuming.
+ portage.writemsg_stdout(
+ colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
+ self._logger.log(" *** Resuming merge...")
+
+ self._save_resume_list()
+
+ try:
+ self._background = self._background_mode()
+ except self._unknown_internal_error:
+ return 1
+
+ for root in self.trees:
+ root_config = self.trees[root]["root_config"]
+
+ # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
+ # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
+ # for ensuring sane $PWD (bug #239560) and storing elog messages.
+ tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
+ if not tmpdir or not os.path.isdir(tmpdir):
+ msg = "The directory specified in your " + \
+ "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
+ "does not exist. Please create this " + \
+ "directory or correct your PORTAGE_TMPDIR setting."
+ msg = textwrap.wrap(msg, 70)
+ out = portage.output.EOutput()
+ for l in msg:
+ out.eerror(l)
+ return 1
+
+ if self._background:
+ root_config.settings.unlock()
+ root_config.settings["PORTAGE_BACKGROUND"] = "1"
+ root_config.settings.backup_changes("PORTAGE_BACKGROUND")
+ root_config.settings.lock()
+
+ self.pkgsettings[root] = portage.config(
+ clone=root_config.settings)
+
+ keep_going = "--keep-going" in self.myopts
+ fetchonly = self._build_opts.fetchonly
+ mtimedb = self._mtimedb
+ failed_pkgs = self._failed_pkgs
+
+ rval = self._generate_digests()
+ if rval != os.EX_OK:
+ return rval
+
+ rval = self._env_sanity_check()
+ if rval != os.EX_OK:
+ return rval
+
+ # TODO: Immediately recalculate deps here if --keep-going
+ # is enabled and corrupt manifests are detected.
+ rval = self._check_manifests()
+ if rval != os.EX_OK and not keep_going:
+ return rval
+
+ if not fetchonly:
+ rval = self._run_pkg_pretend()
+ if rval != os.EX_OK:
+ return rval
+
+ while True:
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+ {"signal":signum})
+ self.terminate()
+ received_signal.append(128 + signum)
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+ try:
+ rval = self._merge()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ if received_signal:
+ sys.exit(received_signal[0])
+
+ if rval == os.EX_OK or fetchonly or not keep_going:
+ break
+ if "resume" not in mtimedb:
+ break
+ mergelist = self._mtimedb["resume"].get("mergelist")
+ if not mergelist:
+ break
+
+ if not failed_pkgs:
+ break
+
+ for failed_pkg in failed_pkgs:
+ mergelist.remove(list(failed_pkg.pkg))
+
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ if not mergelist:
+ break
+
+ if not self._calc_resume_list():
+ break
+
+ clear_caches(self.trees)
+ if not self._mergelist:
+ break
+
+ self._save_resume_list()
+ self._pkg_count.curval = 0
+ self._pkg_count.maxval = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._status_display.maxval = self._pkg_count.maxval
+
+ self._logger.log(" *** Finished. Cleaning up...")
+
+ if failed_pkgs:
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ printer = portage.output.EOutput()
+ background = self._background
+ failure_log_shown = False
+ if background and len(self._failed_pkgs_all) == 1:
+ # If only one package failed then just show it's
+ # whole log for easy viewing.
+ failed_pkg = self._failed_pkgs_all[-1]
+ build_dir = failed_pkg.build_dir
+ log_file = None
+
+ log_paths = [failed_pkg.build_log]
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ try:
+ log_file = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'), mode='rb')
+ except IOError:
+ pass
+ else:
+ if log_path.endswith('.gz'):
+ log_file = gzip.GzipFile(filename='',
+ mode='rb', fileobj=log_file)
+
+ if log_file is not None:
+ try:
+ for line in log_file:
+ writemsg_level(line, noiselevel=-1)
+ except zlib.error as e:
+ writemsg_level("%s\n" % (e,), level=logging.ERROR,
+ noiselevel=-1)
+ finally:
+ log_file.close()
+ failure_log_shown = True
+
+ # Dump mod_echo output now since it tends to flood the terminal.
+ # This allows us to avoid having more important output, generated
+ # later, from being swept away by the mod_echo output.
+ mod_echo_output = _flush_elog_mod_echo()
+
+ if background and not failure_log_shown and \
+ self._failed_pkgs_all and \
+ self._failed_pkgs_die_msgs and \
+ not mod_echo_output:
+
+ for mysettings, key, logentries in self._failed_pkgs_die_msgs:
+ root_msg = ""
+ if mysettings["ROOT"] != "/":
+ root_msg = " merged to %s" % mysettings["ROOT"]
+ print()
+ printer.einfo("Error messages for package %s%s:" % \
+ (colorize("INFORM", key), root_msg))
+ print()
+ for phase in portage.const.EBUILD_PHASES:
+ if phase not in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ printer.eerror(line.strip("\n"))
+
+ if self._post_mod_echo_msgs:
+ for msg in self._post_mod_echo_msgs:
+ msg()
+
+ if len(self._failed_pkgs_all) > 1 or \
+ (self._failed_pkgs_all and keep_going):
+ if len(self._failed_pkgs_all) > 1:
+ msg = "The following %d packages have " % \
+ len(self._failed_pkgs_all) + \
+ "failed to build or install:"
+ else:
+ msg = "The following package has " + \
+ "failed to build or install:"
+
+ printer.eerror("")
+ for line in textwrap.wrap(msg, 72):
+ printer.eerror(line)
+ printer.eerror("")
+ for failed_pkg in self._failed_pkgs_all:
+ # Use _unicode_decode() to force unicode format string so
+ # that Package.__unicode__() is called in python2.
+ msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ printer.eerror(msg)
+ if log_path is not None:
+ printer.eerror(" '%s'" % colorize('INFORM', log_path))
+ printer.eerror("")
+
+ if self._failed_pkgs_all:
+ return 1
+ return os.EX_OK
+
+ def _elog_listener(self, mysettings, key, logentries, fulltext):
+ errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
+ if errors:
+ self._failed_pkgs_die_msgs.append(
+ (mysettings, key, errors))
+
+ def _locate_failure_log(self, failed_pkg):
+
+ build_dir = failed_pkg.build_dir
+ log_file = None
+
+ log_paths = [failed_pkg.build_log]
+
+ for log_path in log_paths:
+ if not log_path:
+ continue
+
+ try:
+ log_size = os.stat(log_path).st_size
+ except OSError:
+ continue
+
+ if log_size == 0:
+ continue
+
+ return log_path
+
+ return None
+
+ def _add_packages(self):
+ pkg_queue = self._pkg_queue
+ for pkg in self._mergelist:
+ if isinstance(pkg, Package):
+ pkg_queue.append(pkg)
+ elif isinstance(pkg, Blocker):
+ pass
+
+ def _system_merge_started(self, merge):
+ """
+ Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
+ In general, this keeps track of installed system packages with
+ unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
+ a fragile situation, so we don't execute any unrelated builds until
+ the circular dependencies are built and installed.
+ """
+ graph = self._digraph
+ if graph is None:
+ return
+ pkg = merge.merge.pkg
+
+ # Skip this if $ROOT != / since it shouldn't matter if there
+ # are unsatisfied system runtime deps in this case.
+ if pkg.root != '/':
+ return
+
+ completed_tasks = self._completed_tasks
+ unsatisfied = self._unsatisfied_system_deps
+
+ def ignore_non_runtime_or_satisfied(priority):
+ """
+ Ignore non-runtime and satisfied runtime priorities.
+ """
+ if isinstance(priority, DepPriority) and \
+ not priority.satisfied and \
+ (priority.runtime or priority.runtime_post):
+ return False
+ return True
+
+ # When checking for unsatisfied runtime deps, only check
+ # direct deps since indirect deps are checked when the
+ # corresponding parent is merged.
+ for child in graph.child_nodes(pkg,
+ ignore_priority=ignore_non_runtime_or_satisfied):
+ if not isinstance(child, Package) or \
+ child.operation == 'uninstall':
+ continue
+ if child is pkg:
+ continue
+ if child.operation == 'merge' and \
+ child not in completed_tasks:
+ unsatisfied.add(child)
+
+ def _merge_wait_exit_handler(self, task):
+ self._merge_wait_scheduled.remove(task)
+ self._merge_exit(task)
+
+ def _merge_exit(self, merge):
+ self._running_tasks.pop(id(merge), None)
+ self._do_merge_exit(merge)
+ self._deallocate_config(merge.merge.settings)
+ if merge.returncode == os.EX_OK and \
+ not merge.merge.pkg.installed:
+ self._status_display.curval += 1
+ self._status_display.merges = len(self._task_queues.merge)
+ self._schedule()
+
+ def _do_merge_exit(self, merge):
+ pkg = merge.merge.pkg
+ if merge.returncode != os.EX_OK:
+ settings = merge.merge.settings
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=pkg,
+ returncode=merge.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
+ self._status_display.failed = len(self._failed_pkgs)
+ return
+
+ self._task_complete(pkg)
+ pkg_to_replace = merge.merge.pkg_to_replace
+ if pkg_to_replace is not None:
+ # When a package is replaced, mark it's uninstall
+ # task complete (if any).
+ if self._digraph is not None and \
+ pkg_to_replace in self._digraph:
+ try:
+ self._pkg_queue.remove(pkg_to_replace)
+ except ValueError:
+ pass
+ self._task_complete(pkg_to_replace)
+ else:
+ self._pkg_cache.pop(pkg_to_replace, None)
+
+ if pkg.installed:
+ return
+
+ self._restart_if_necessary(pkg)
+
+ # Call mtimedb.commit() after each merge so that
+ # --resume still works after being interrupted
+ # by reboot, sigkill or similar.
+ mtimedb = self._mtimedb
+ mtimedb["resume"]["mergelist"].remove(list(pkg))
+ if not mtimedb["resume"]["mergelist"]:
+ del mtimedb["resume"]
+ mtimedb.commit()
+
+ def _build_exit(self, build):
+ self._running_tasks.pop(id(build), None)
+ if build.returncode == os.EX_OK and self._terminated_tasks:
+ # We've been interrupted, so we won't
+ # add this to the merge queue.
+ self.curval += 1
+ self._deallocate_config(build.settings)
+ elif build.returncode == os.EX_OK:
+ self.curval += 1
+ merge = PackageMerge(merge=build)
+ self._running_tasks[id(merge)] = merge
+ if not build.build_opts.buildpkgonly and \
+ build.pkg in self._deep_system_deps:
+ # Since dependencies on system packages are frequently
+ # unspecified, merge them only when no builds are executing.
+ self._merge_wait_queue.append(merge)
+ merge.addStartListener(self._system_merge_started)
+ else:
+ merge.addExitListener(self._merge_exit)
+ self._task_queues.merge.add(merge)
+ self._status_display.merges = len(self._task_queues.merge)
+ else:
+ settings = build.settings
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=build.pkg,
+ returncode=build.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
+ self._status_display.failed = len(self._failed_pkgs)
+ self._deallocate_config(build.settings)
+ self._jobs -= 1
+ self._status_display.running = self._jobs
+ self._schedule()
+
+ def _extract_exit(self, build):
+ self._build_exit(build)
+
+ def _task_complete(self, pkg):
+ self._completed_tasks.add(pkg)
+ self._unsatisfied_system_deps.discard(pkg)
+ self._choose_pkg_return_early = False
+ blocker_db = self._blocker_db[pkg.root]
+ blocker_db.discardBlocker(pkg)
+
+ def _merge(self):
+
+ self._add_prefetchers()
+ self._add_packages()
+ pkg_queue = self._pkg_queue
+ failed_pkgs = self._failed_pkgs
+ portage.locks._quiet = self._background
+ portage.elog.add_listener(self._elog_listener)
+ rval = os.EX_OK
+
+ try:
+ self._main_loop()
+ finally:
+ self._main_loop_cleanup()
+ portage.locks._quiet = False
+ portage.elog.remove_listener(self._elog_listener)
+ if failed_pkgs:
+ rval = failed_pkgs[-1].returncode
+
+ return rval
+
+ def _main_loop_cleanup(self):
+ del self._pkg_queue[:]
+ self._completed_tasks.clear()
+ self._deep_system_deps.clear()
+ self._unsatisfied_system_deps.clear()
+ self._choose_pkg_return_early = False
+ self._status_display.reset()
+ self._digraph = None
+ self._task_queues.fetch.clear()
+ self._prefetchers.clear()
+
+ def _choose_pkg(self):
+ """
+ Choose a task that has all its dependencies satisfied. This is used
+ for parallel build scheduling, and ensures that we don't build
+ anything with deep dependencies that have yet to be merged.
+ """
+
+ if self._choose_pkg_return_early:
+ return None
+
+ if self._digraph is None:
+ if self._is_work_scheduled() and \
+ not ("--nodeps" in self.myopts and \
+ (self._max_jobs is True or self._max_jobs > 1)):
+ self._choose_pkg_return_early = True
+ return None
+ return self._pkg_queue.pop(0)
+
+ if not self._is_work_scheduled():
+ return self._pkg_queue.pop(0)
+
+ self._prune_digraph()
+
+ chosen_pkg = None
+
+ # Prefer uninstall operations when available.
+ graph = self._digraph
+ for pkg in self._pkg_queue:
+ if pkg.operation == 'uninstall' and \
+ not graph.child_nodes(pkg):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is None:
+ later = set(self._pkg_queue)
+ for pkg in self._pkg_queue:
+ later.remove(pkg)
+ if not self._dependent_on_scheduled_merges(pkg, later):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is not None:
+ self._pkg_queue.remove(chosen_pkg)
+
+ if chosen_pkg is None:
+ # There's no point in searching for a package to
+ # choose until at least one of the existing jobs
+ # completes.
+ self._choose_pkg_return_early = True
+
+ return chosen_pkg
+
+ def _dependent_on_scheduled_merges(self, pkg, later):
+ """
+ Traverse the subgraph of the given packages deep dependencies
+ to see if it contains any scheduled merges.
+ @param pkg: a package to check dependencies for
+ @type pkg: Package
+ @param later: packages for which dependence should be ignored
+ since they will be merged later than pkg anyway and therefore
+ delaying the merge of pkg will not result in a more optimal
+ merge order
+ @type later: set
+ @rtype: bool
+ @returns: True if the package is dependent, False otherwise.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+
+ dependent = False
+ traversed_nodes = set([pkg])
+ direct_deps = graph.child_nodes(pkg)
+ node_stack = direct_deps
+ direct_deps = frozenset(direct_deps)
+ while node_stack:
+ node = node_stack.pop()
+ if node in traversed_nodes:
+ continue
+ traversed_nodes.add(node)
+ if not ((node.installed and node.operation == "nomerge") or \
+ (node.operation == "uninstall" and \
+ node not in direct_deps) or \
+ node in completed_tasks or \
+ node in later):
+ dependent = True
+ break
+
+ # Don't traverse children of uninstall nodes since
+ # those aren't dependencies in the usual sense.
+ if node.operation != "uninstall":
+ node_stack.extend(graph.child_nodes(node))
+
+ return dependent
+
+ def _allocate_config(self, root):
+ """
+ Allocate a unique config instance for a task in order
+ to prevent interference between parallel tasks.
+ """
+ if self._config_pool[root]:
+ temp_settings = self._config_pool[root].pop()
+ else:
+ temp_settings = portage.config(clone=self.pkgsettings[root])
+ # Since config.setcpv() isn't guaranteed to call config.reset() due to
+ # performance reasons, call it here to make sure all settings from the
+ # previous package get flushed out (such as PORTAGE_LOG_FILE).
+ temp_settings.reload()
+ temp_settings.reset()
+ return temp_settings
+
+ def _deallocate_config(self, settings):
+ self._config_pool[settings["ROOT"]].append(settings)
+
+ def _main_loop(self):
+
+ # Only allow 1 job max if a restart is scheduled
+ # due to portage update.
+ if self._is_restart_scheduled() or \
+ self._opts_no_background.intersection(self.myopts):
+ self._set_max_jobs(1)
+
+ while self._schedule():
+ self._poll_loop()
+
+ while True:
+ self._schedule()
+ if not self._is_work_scheduled():
+ break
+ self._poll_loop()
+
+ def _keep_scheduling(self):
+ return bool(not self._terminated_tasks and self._pkg_queue and \
+ not (self._failed_pkgs and not self._build_opts.fetchonly))
+
+ def _is_work_scheduled(self):
+ return bool(self._running_tasks)
+
+ def _schedule_tasks(self):
+
+ while True:
+
+ # When the number of jobs and merges drops to zero,
+ # process a single merge from _merge_wait_queue if
+ # it's not empty. We only process one since these are
+ # special packages and we want to ensure that
+ # parallel-install does not cause more than one of
+ # them to install at the same time.
+ if (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge):
+ task = self._merge_wait_queue.popleft()
+ task.addExitListener(self._merge_wait_exit_handler)
+ self._task_queues.merge.add(task)
+ self._status_display.merges = len(self._task_queues.merge)
+ self._merge_wait_scheduled.append(task)
+
+ self._schedule_tasks_imp()
+ self._status_display.display()
+
+ state_change = 0
+ for q in self._task_queues.values():
+ if q.schedule():
+ state_change += 1
+
+ # Cancel prefetchers if they're the only reason
+ # the main poll loop is still running.
+ if self._failed_pkgs and not self._build_opts.fetchonly and \
+ not self._is_work_scheduled() and \
+ self._task_queues.fetch:
+ self._task_queues.fetch.clear()
+ state_change += 1
+
+ if not (state_change or \
+ (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge)):
+ break
+
+ return self._keep_scheduling()
+
+ def _job_delay(self):
+ """
+ @rtype: bool
+ @returns: True if job scheduling should be delayed, False otherwise.
+ """
+
+ if self._jobs and self._max_load is not None:
+
+ current_time = time.time()
+
+ delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
+ if delay > self._job_delay_max:
+ delay = self._job_delay_max
+ if (current_time - self._previous_job_start_time) < delay:
+ return True
+
+ return False
+
+ def _schedule_tasks_imp(self):
+ """
+ @rtype: bool
+ @returns: True if state changed, False otherwise.
+ """
+
+ state_change = 0
+
+ while True:
+
+ if not self._keep_scheduling():
+ return bool(state_change)
+
+ if self._choose_pkg_return_early or \
+ self._merge_wait_scheduled or \
+ (self._jobs and self._unsatisfied_system_deps) or \
+ not self._can_add_job() or \
+ self._job_delay():
+ return bool(state_change)
+
+ pkg = self._choose_pkg()
+ if pkg is None:
+ return bool(state_change)
+
+ state_change += 1
+
+ if not pkg.installed:
+ self._pkg_count.curval += 1
+
+ task = self._task(pkg)
+
+ if pkg.installed:
+ merge = PackageMerge(merge=task)
+ self._running_tasks[id(merge)] = merge
+ merge.addExitListener(self._merge_exit)
+ self._task_queues.merge.addFront(merge)
+
+ elif pkg.built:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.addExitListener(self._extract_exit)
+ self._task_queues.jobs.add(task)
+
+ else:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.addExitListener(self._build_exit)
+ self._task_queues.jobs.add(task)
+
+ return bool(state_change)
+
+ def _task(self, pkg):
+
+ pkg_to_replace = None
+ if pkg.operation != "uninstall":
+ vardb = pkg.root_config.trees["vartree"].dbapi
+ previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
+ if portage.cpv_getkey(x) == pkg.cp]
+ if not previous_cpv and vardb.cpv_exists(pkg.cpv):
+ # same cpv, different SLOT
+ previous_cpv = [pkg.cpv]
+ if previous_cpv:
+ previous_cpv = previous_cpv.pop()
+ pkg_to_replace = self._pkg(previous_cpv,
+ "installed", pkg.root_config, installed=True,
+ operation="uninstall")
+
+ prefetcher = self._prefetchers.pop(pkg, None)
+ if prefetcher is not None and not prefetcher.isAlive():
+ try:
+ self._task_queues.fetch._task_queue.remove(prefetcher)
+ except ValueError:
+ pass
+ prefetcher = None
+
+ task = MergeListItem(args_set=self._args_set,
+ background=self._background, binpkg_opts=self._binpkg_opts,
+ build_opts=self._build_opts,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ emerge_opts=self.myopts,
+ find_blockers=self._find_blockers(pkg), logger=self._logger,
+ mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
+ pkg_to_replace=pkg_to_replace,
+ prefetcher=prefetcher,
+ scheduler=self._sched_iface,
+ settings=self._allocate_config(pkg.root),
+ statusMessage=self._status_msg,
+ world_atom=self._world_atom)
+
+ return task
+
+ def _failed_pkg_msg(self, failed_pkg, action, preposition):
+ pkg = failed_pkg.pkg
+ msg = "%s to %s %s" % \
+ (bad("Failed"), action, colorize("INFORM", pkg.cpv))
+ if pkg.root != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ self._status_msg(msg)
+
+ if log_path is not None:
+ self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
+
+ def _status_msg(self, msg):
+ """
+ Display a brief status message (no newlines) in the status display.
+ This is called by tasks to provide feedback to the user. This
+ delegates the resposibility of generating \r and \n control characters,
+ to guarantee that lines are created or erased when necessary and
+ appropriate.
+
+ @type msg: str
+ @param msg: a brief status message (no newlines allowed)
+ """
+ if not self._background:
+ writemsg_level("\n")
+ self._status_display.displayMessage(msg)
+
+ def _save_resume_list(self):
+ """
+ Do this before verifying the ebuild Manifests since it might
+ be possible for the user to use --resume --skipfirst get past
+ a non-essential package with a broken digest.
+ """
+ mtimedb = self._mtimedb
+
+ mtimedb["resume"] = {}
+ # Stored as a dict starting with portage-2.1.6_rc1, and supported
+ # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
+ # a list type for options.
+ mtimedb["resume"]["myopts"] = self.myopts.copy()
+
+ # Convert Atom instances to plain str.
+ mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
+ mtimedb["resume"]["mergelist"] = [list(x) \
+ for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"]
+
+ mtimedb.commit()
+
+ def _calc_resume_list(self):
+ """
+ Use the current resume list to calculate a new one,
+ dropping any packages with unsatisfied deps.
+ @rtype: bool
+ @returns: True if successful, False otherwise.
+ """
+ print(colorize("GOOD", "*** Resuming merge..."))
+
+ # free some memory before creating
+ # the resume depgraph
+ self._destroy_graph()
+
+ myparams = create_depgraph_params(self.myopts, None)
+ success = False
+ e = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ self.settings, self.trees, self._mtimedb, self.myopts,
+ myparams, self._spinner)
+ except depgraph.UnsatisfiedResumeDep as exc:
+ # rename variable to avoid python-3.0 error:
+ # SyntaxError: can not delete variable 'e' referenced in nested
+ # scope
+ e = exc
+ mydepgraph = e.depgraph
+ dropped_tasks = set()
+
+ if e is not None:
+ def unsatisfied_resume_dep_msg():
+ mydepgraph.display_problems()
+ out = portage.output.EOutput()
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ show_parents = set()
+ for dep in e.value:
+ if dep.parent in show_parents:
+ continue
+ show_parents.add(dep.parent)
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+ self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
+ return False
+
+ if success and self._show_list():
+ mylist = mydepgraph.altlist()
+ if mylist:
+ if "--tree" in self.myopts:
+ mylist.reverse()
+ mydepgraph.display(mylist, favorites=self._favorites)
+
+ if not success:
+ self._post_mod_echo_msgs.append(mydepgraph.display_problems)
+ return False
+ mydepgraph.display_problems()
+ self._init_graph(mydepgraph.schedulerGraph())
+
+ msg_width = 75
+ for task in dropped_tasks:
+ if not (isinstance(task, Package) and task.operation == "merge"):
+ continue
+ pkg = task
+ msg = "emerge --keep-going:" + \
+ " %s" % (pkg.cpv,)
+ if pkg.root != "/":
+ msg += " for %s" % (pkg.root,)
+ msg += " dropped due to unsatisfied dependency."
+ for line in textwrap.wrap(msg, msg_width):
+ eerror(line, phase="other", key=pkg.cpv)
+ settings = self.pkgsettings[pkg.root]
+ # Ensure that log collection from $T is disabled inside
+ # elog_process(), since any logs that might exist are
+ # not valid here.
+ settings.pop("T", None)
+ portage.elog.elog_process(pkg.cpv, settings)
+ self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
+
+ return True
+
+ def _show_list(self):
+ myopts = self.myopts
+ if "--quiet" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts):
+ return True
+ return False
+
+ def _world_atom(self, pkg):
+ """
+ Add or remove the package to the world file, but only if
+ it's supposed to be added or removed. Otherwise, do nothing.
+ """
+
+ if set(("--buildpkgonly", "--fetchonly",
+ "--fetch-all-uri",
+ "--oneshot", "--onlydeps",
+ "--pretend")).intersection(self.myopts):
+ return
+
+ if pkg.root != self.target_root:
+ return
+
+ args_set = self._args_set
+ if not args_set.findAtomForPackage(pkg):
+ return
+
+ logger = self._logger
+ pkg_count = self._pkg_count
+ root_config = pkg.root_config
+ world_set = root_config.sets["selected"]
+ world_locked = False
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
+ try:
+ if hasattr(world_set, "load"):
+ world_set.load() # maybe it's changed on disk
+
+ if pkg.operation == "uninstall":
+ if hasattr(world_set, "cleanPackage"):
+ world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
+ pkg.cpv)
+ if hasattr(world_set, "remove"):
+ for s in pkg.root_config.setconfig.active:
+ world_set.remove(SETPREFIX+s)
+ else:
+ atom = create_world_atom(pkg, args_set, root_config)
+ if atom:
+ if hasattr(world_set, "add"):
+ self._status_msg(('Recording %s in "world" ' + \
+ 'favorites file...') % atom)
+ logger.log(" === (%s of %s) Updating world file (%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv))
+ world_set.add(atom)
+ else:
+ writemsg_level('\n!!! Unable to record %s in "world"\n' % \
+ (atom,), level=logging.WARN, noiselevel=-1)
+ finally:
+ if world_locked:
+ world_set.unlock()
+
+ def _pkg(self, cpv, type_name, root_config, installed=False,
+ operation=None, myrepo=None):
+ """
+ Get a package instance from the cache, or create a new
+ one if necessary. Raises KeyError from aux_get if it
+ failures for some reason (package does not exist or is
+ corrupt).
+ """
+
+ # Reuse existing instance when available.
+ pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
+ type_name=type_name, repo_name=myrepo, root_config=root_config,
+ installed=installed, operation=operation))
+
+ if pkg is not None:
+ return pkg
+
+ tree_type = depgraph.pkg_tree_map[type_name]
+ db = root_config.trees[tree_type].dbapi
+ db_keys = list(self.trees[root_config.root][
+ tree_type].dbapi._aux_cache_keys)
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+ pkg = Package(built=(type_name != "ebuild"),
+ cpv=cpv, installed=installed, metadata=metadata,
+ root_config=root_config, type_name=type_name)
+ self._pkg_cache[pkg] = pkg
+ return pkg
diff --git a/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
new file mode 100644
index 0000000..c1c98c4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
@@ -0,0 +1,89 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from _emerge.SlotObject import SlotObject
+from collections import deque
+class SequentialTaskQueue(SlotObject):
+
+ __slots__ = ("max_jobs", "running_tasks") + \
+ ("_dirty", "_scheduling", "_task_queue")
+
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self._task_queue = deque()
+ self.running_tasks = set()
+ if self.max_jobs is None:
+ self.max_jobs = 1
+ self._dirty = True
+
+ def add(self, task):
+ self._task_queue.append(task)
+ self._dirty = True
+
+ def addFront(self, task):
+ self._task_queue.appendleft(task)
+ self._dirty = True
+
+ def schedule(self):
+
+ if not self._dirty:
+ return False
+
+ if not self:
+ return False
+
+ if self._scheduling:
+ # Ignore any recursive schedule() calls triggered via
+ # self._task_exit().
+ return False
+
+ self._scheduling = True
+
+ task_queue = self._task_queue
+ running_tasks = self.running_tasks
+ max_jobs = self.max_jobs
+ state_changed = False
+
+ while task_queue and \
+ (max_jobs is True or len(running_tasks) < max_jobs):
+ task = task_queue.popleft()
+ cancelled = getattr(task, "cancelled", None)
+ if not cancelled:
+ running_tasks.add(task)
+ task.addExitListener(self._task_exit)
+ task.start()
+ state_changed = True
+
+ self._dirty = False
+ self._scheduling = False
+
+ return state_changed
+
+ def _task_exit(self, task):
+ """
+ Since we can always rely on exit listeners being called, the set of
+ running tasks is always pruned automatically and there is never any need
+ to actively prune it.
+ """
+ self.running_tasks.remove(task)
+ if self._task_queue:
+ self._dirty = True
+
+ def clear(self):
+ self._task_queue.clear()
+ running_tasks = self.running_tasks
+ while running_tasks:
+ task = running_tasks.pop()
+ task.removeExitListener(self._task_exit)
+ task.cancel()
+ self._dirty = False
+
+ def __bool__(self):
+ return bool(self._task_queue or self.running_tasks)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __len__(self):
+ return len(self._task_queue) + len(self.running_tasks)
diff --git a/portage_with_autodep/pym/_emerge/SetArg.py b/portage_with_autodep/pym/_emerge/SetArg.py
new file mode 100644
index 0000000..94cf0a6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SetArg.py
@@ -0,0 +1,11 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DependencyArg import DependencyArg
+from portage._sets import SETPREFIX
+class SetArg(DependencyArg):
+ def __init__(self, pset=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.pset = pset
+ self.name = self.arg[len(SETPREFIX):]
+
diff --git a/portage_with_autodep/pym/_emerge/SlotObject.py b/portage_with_autodep/pym/_emerge/SlotObject.py
new file mode 100644
index 0000000..fdc6f35
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SlotObject.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class SlotObject(object):
+ __slots__ = ("__weakref__",)
+
+ def __init__(self, **kwargs):
+ classes = [self.__class__]
+ while classes:
+ c = classes.pop()
+ if c is SlotObject:
+ continue
+ classes.extend(c.__bases__)
+ slots = getattr(c, "__slots__", None)
+ if not slots:
+ continue
+ for myattr in slots:
+ myvalue = kwargs.get(myattr, None)
+ setattr(self, myattr, myvalue)
+
+ def copy(self):
+ """
+ Create a new instance and copy all attributes
+ defined from __slots__ (including those from
+ inherited classes).
+ """
+ obj = self.__class__()
+
+ classes = [self.__class__]
+ while classes:
+ c = classes.pop()
+ if c is SlotObject:
+ continue
+ classes.extend(c.__bases__)
+ slots = getattr(c, "__slots__", None)
+ if not slots:
+ continue
+ for myattr in slots:
+ setattr(obj, myattr, getattr(self, myattr))
+
+ return obj
+
diff --git a/portage_with_autodep/pym/_emerge/SpawnProcess.py b/portage_with_autodep/pym/_emerge/SpawnProcess.py
new file mode 100644
index 0000000..b72971c
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SpawnProcess.py
@@ -0,0 +1,235 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+import sys
+from portage.cache.mappings import slot_dict_class
+import portage
+from portage import _encodings
+from portage import _unicode_encode
+from portage import os
+from portage.const import BASH_BINARY
+import fcntl
+import errno
+import gzip
+
+class SpawnProcess(SubProcess):
+
+ """
+ Constructor keyword args are passed into portage.process.spawn().
+ The required "args" keyword argument will be passed as the first
+ spawn() argument.
+ """
+
+ _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
+ "uid", "gid", "groups", "umask", "logfile",
+ "path_lookup", "pre_exec")
+
+ __slots__ = ("args",) + \
+ _spawn_kwarg_names + ("_selinux_type",)
+
+ _file_names = ("log", "process", "stdout")
+ _files_dict = slot_dict_class(_file_names, prefix="")
+
+ def _start(self):
+
+ if self.cancelled:
+ return
+
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ fd_pipes = self.fd_pipes
+ fd_pipes.setdefault(0, sys.stdin.fileno())
+ fd_pipes.setdefault(1, sys.stdout.fileno())
+ fd_pipes.setdefault(2, sys.stderr.fileno())
+
+ # flush any pending output
+ for fd in fd_pipes.values():
+ if fd == sys.stdout.fileno():
+ sys.stdout.flush()
+ if fd == sys.stderr.fileno():
+ sys.stderr.flush()
+
+ self._files = self._files_dict()
+ files = self._files
+
+ master_fd, slave_fd = self._pipe(fd_pipes)
+ fcntl.fcntl(master_fd, fcntl.F_SETFL,
+ fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ logfile = None
+ if self._can_log(slave_fd):
+ logfile = self.logfile
+
+ null_input = None
+ fd_pipes_orig = fd_pipes.copy()
+ if self.background:
+ # TODO: Use job control functions like tcsetpgrp() to control
+ # access to stdin. Until then, use /dev/null so that any
+ # attempts to read from stdin will immediately return EOF
+ # instead of blocking indefinitely.
+ null_input = open('/dev/null', 'rb')
+ fd_pipes[0] = null_input.fileno()
+ else:
+ fd_pipes[0] = fd_pipes_orig[0]
+
+ # WARNING: It is very important to use unbuffered mode here,
+ # in order to avoid issue 5380 with python3.
+ files.process = os.fdopen(master_fd, 'rb', 0)
+ if logfile is not None:
+
+ fd_pipes[1] = slave_fd
+ fd_pipes[2] = slave_fd
+
+ files.log = open(_unicode_encode(logfile,
+ encoding=_encodings['fs'], errors='strict'), mode='ab')
+ if logfile.endswith('.gz'):
+ files.log = gzip.GzipFile(filename='', mode='ab',
+ fileobj=files.log)
+
+ portage.util.apply_secpass_permissions(logfile,
+ uid=portage.portage_uid, gid=portage.portage_gid,
+ mode=0o660)
+
+ if not self.background:
+ files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
+
+ output_handler = self._output_handler
+
+ else:
+
+ # Create a dummy pipe so the scheduler can monitor
+ # the process from inside a poll() loop.
+ fd_pipes[self._dummy_pipe_fd] = slave_fd
+ if self.background:
+ fd_pipes[1] = slave_fd
+ fd_pipes[2] = slave_fd
+ output_handler = self._dummy_handler
+
+ kwargs = {}
+ for k in self._spawn_kwarg_names:
+ v = getattr(self, k)
+ if v is not None:
+ kwargs[k] = v
+
+ kwargs["fd_pipes"] = fd_pipes
+ kwargs["returnpid"] = True
+ kwargs.pop("logfile", None)
+
+ self._reg_id = self.scheduler.register(files.process.fileno(),
+ self._registered_events, output_handler)
+ self._registered = True
+
+ retval = self._spawn(self.args, **kwargs)
+
+ os.close(slave_fd)
+ if null_input is not None:
+ null_input.close()
+
+ if isinstance(retval, int):
+ # spawn failed
+ self._unregister()
+ self._set_returncode((self.pid, retval))
+ self.wait()
+ return
+
+ self.pid = retval[0]
+ portage.process.spawned_pids.remove(self.pid)
+
+ def _can_log(self, slave_fd):
+ return True
+
+ def _pipe(self, fd_pipes):
+ """
+ @type fd_pipes: dict
+ @param fd_pipes: pipes from which to copy terminal size if desired.
+ """
+ return os.pipe()
+
+ def _spawn(self, args, **kwargs):
+ spawn_func = portage.process.spawn
+
+ if self._selinux_type is not None:
+ spawn_func = portage.selinux.spawn_wrapper(spawn_func,
+ self._selinux_type)
+ # bash is an allowed entrypoint, while most binaries are not
+ if args[0] != BASH_BINARY:
+ args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
+
+ return spawn_func(args, **kwargs)
+
+ def _output_handler(self, fd, event):
+
+ files = self._files
+ buf = self._read_buf(files.process, event)
+
+ if buf is not None:
+
+ if buf:
+ if not self.background:
+ write_successful = False
+ failures = 0
+ while True:
+ try:
+ if not write_successful:
+ buf.tofile(files.stdout)
+ write_successful = True
+ files.stdout.flush()
+ break
+ except IOError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ del e
+ failures += 1
+ if failures > 50:
+ # Avoid a potentially infinite loop. In
+ # most cases, the failure count is zero
+ # and it's unlikely to exceed 1.
+ raise
+
+ # This means that a subprocess has put an inherited
+ # stdio file descriptor (typically stdin) into
+ # O_NONBLOCK mode. This is not acceptable (see bug
+ # #264435), so revert it. We need to use a loop
+ # here since there's a race condition due to
+ # parallel processes being able to change the
+ # flags on the inherited file descriptor.
+ # TODO: When possible, avoid having child processes
+ # inherit stdio file descriptors from portage
+ # (maybe it can't be avoided with
+ # PROPERTIES=interactive).
+ fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
+ fcntl.fcntl(files.stdout.fileno(),
+ fcntl.F_GETFL) ^ os.O_NONBLOCK)
+
+ try:
+ buf.tofile(files.log)
+ except TypeError:
+ # array.tofile() doesn't work with GzipFile
+ files.log.write(buf.tostring())
+ files.log.flush()
+ else:
+ self._unregister()
+ self.wait()
+
+ self._unregister_if_appropriate(event)
+
+ def _dummy_handler(self, fd, event):
+ """
+ This method is mainly interested in detecting EOF, since
+ the only purpose of the pipe is to allow the scheduler to
+ monitor the process from inside a poll() loop.
+ """
+
+ buf = self._read_buf(self._files.process, event)
+
+ if buf is not None:
+
+ if buf:
+ pass
+ else:
+ self._unregister()
+ self.wait()
+
+ self._unregister_if_appropriate(event)
+
diff --git a/portage_with_autodep/pym/_emerge/SubProcess.py b/portage_with_autodep/pym/_emerge/SubProcess.py
new file mode 100644
index 0000000..b99cf0b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SubProcess.py
@@ -0,0 +1,141 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+import signal
+import errno
+
+class SubProcess(AbstractPollTask):
+
+ __slots__ = ("pid",) + \
+ ("_files", "_reg_id")
+
+ # A file descriptor is required for the scheduler to monitor changes from
+ # inside a poll() loop. When logging is not enabled, create a pipe just to
+ # serve this purpose alone.
+ _dummy_pipe_fd = 9
+
+ def _poll(self):
+ if self.returncode is not None:
+ return self.returncode
+ if self.pid is None:
+ return self.returncode
+ if self._registered:
+ return self.returncode
+
+ try:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ retval = os.waitpid(self.pid, os.WNOHANG)
+ except OSError as e:
+ if e.errno != errno.ECHILD:
+ raise
+ del e
+ retval = (self.pid, 1)
+
+ if retval[0] == 0:
+ return None
+ self._set_returncode(retval)
+ self.wait()
+ return self.returncode
+
+ def _cancel(self):
+ if self.isAlive():
+ try:
+ os.kill(self.pid, signal.SIGTERM)
+ except OSError as e:
+ if e.errno != errno.ESRCH:
+ raise
+
+ def isAlive(self):
+ return self.pid is not None and \
+ self.returncode is None
+
+ def _wait(self):
+
+ if self.returncode is not None:
+ return self.returncode
+
+ if self._registered:
+ if self.cancelled:
+ timeout = 1000
+ self.scheduler.schedule(self._reg_id, timeout=timeout)
+ if self._registered:
+ try:
+ os.kill(self.pid, signal.SIGKILL)
+ except OSError as e:
+ if e.errno != errno.ESRCH:
+ raise
+ del e
+ self.scheduler.schedule(self._reg_id, timeout=timeout)
+ if self._registered:
+ self._orphan_process_warn()
+ else:
+ self.scheduler.schedule(self._reg_id)
+ self._unregister()
+ if self.returncode is not None:
+ return self.returncode
+
+ try:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ wait_retval = os.waitpid(self.pid, os.WNOHANG)
+ except OSError as e:
+ if e.errno != errno.ECHILD:
+ raise
+ del e
+ self._set_returncode((self.pid, 1 << 8))
+ else:
+ if wait_retval[0] != 0:
+ self._set_returncode(wait_retval)
+ else:
+ try:
+ wait_retval = os.waitpid(self.pid, 0)
+ except OSError as e:
+ if e.errno != errno.ECHILD:
+ raise
+ del e
+ self._set_returncode((self.pid, 1 << 8))
+ else:
+ self._set_returncode(wait_retval)
+
+ return self.returncode
+
+ def _orphan_process_warn(self):
+ pass
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._reg_id is not None:
+ self.scheduler.unregister(self._reg_id)
+ self._reg_id = None
+
+ if self._files is not None:
+ for f in self._files.values():
+ f.close()
+ self._files = None
+
+ def _set_returncode(self, wait_retval):
+ """
+ Set the returncode in a manner compatible with
+ subprocess.Popen.returncode: A negative value -N indicates
+ that the child was terminated by signal N (Unix only).
+ """
+
+ pid, status = wait_retval
+
+ if os.WIFSIGNALED(status):
+ retval = - os.WTERMSIG(status)
+ else:
+ retval = os.WEXITSTATUS(status)
+
+ self.returncode = retval
+
diff --git a/portage_with_autodep/pym/_emerge/Task.py b/portage_with_autodep/pym/_emerge/Task.py
new file mode 100644
index 0000000..efbe3a9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Task.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SlotObject import SlotObject
+class Task(SlotObject):
+ __slots__ = ("_hash_key", "_hash_value")
+
+ def __eq__(self, other):
+ try:
+ return self._hash_key == other._hash_key
+ except AttributeError:
+ # depgraph._pkg() generates _hash_key
+ # for lookups here, so handle that
+ return self._hash_key == other
+
+ def __ne__(self, other):
+ try:
+ return self._hash_key != other._hash_key
+ except AttributeError:
+ return True
+
+ def __hash__(self):
+ return self._hash_value
+
+ def __len__(self):
+ return len(self._hash_key)
+
+ def __getitem__(self, key):
+ return self._hash_key[key]
+
+ def __iter__(self):
+ return iter(self._hash_key)
+
+ def __contains__(self, key):
+ return key in self._hash_key
+
+ def __str__(self):
+ """
+ Emulate tuple.__repr__, but don't show 'foo' as u'foo' for unicode
+ strings.
+ """
+ return "(%s)" % ", ".join(("'%s'" % x for x in self._hash_key))
diff --git a/portage_with_autodep/pym/_emerge/TaskScheduler.py b/portage_with_autodep/pym/_emerge/TaskScheduler.py
new file mode 100644
index 0000000..83c0cbe
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/TaskScheduler.py
@@ -0,0 +1,25 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.QueueScheduler import QueueScheduler
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+
+class TaskScheduler(object):
+
+ """
+ A simple way to handle scheduling of AsynchrousTask instances. Simply
+ add tasks and call run(). The run() method returns when no tasks remain.
+ """
+
+ def __init__(self, max_jobs=None, max_load=None):
+ self._queue = SequentialTaskQueue(max_jobs=max_jobs)
+ self._scheduler = QueueScheduler(
+ max_jobs=max_jobs, max_load=max_load)
+ self.sched_iface = self._scheduler.sched_iface
+ self.run = self._scheduler.run
+ self.clear = self._scheduler.clear
+ self._scheduler.add(self._queue)
+
+ def add(self, task):
+ self._queue.add(task)
+
diff --git a/portage_with_autodep/pym/_emerge/TaskSequence.py b/portage_with_autodep/pym/_emerge/TaskSequence.py
new file mode 100644
index 0000000..1fecf63
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/TaskSequence.py
@@ -0,0 +1,44 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.CompositeTask import CompositeTask
+from _emerge.AsynchronousTask import AsynchronousTask
+from collections import deque
+
+class TaskSequence(CompositeTask):
+ """
+ A collection of tasks that executes sequentially. Each task
+ must have a addExitListener() method that can be used as
+ a means to trigger movement from one task to the next.
+ """
+
+ __slots__ = ("_task_queue",)
+
+ def __init__(self, **kwargs):
+ AsynchronousTask.__init__(self, **kwargs)
+ self._task_queue = deque()
+
+ def add(self, task):
+ self._task_queue.append(task)
+
+ def _start(self):
+ self._start_next_task()
+
+ def _cancel(self):
+ self._task_queue.clear()
+ CompositeTask._cancel(self)
+
+ def _start_next_task(self):
+ self._start_task(self._task_queue.popleft(),
+ self._task_exit_handler)
+
+ def _task_exit_handler(self, task):
+ if self._default_exit(task) != os.EX_OK:
+ self.wait()
+ elif self._task_queue:
+ self._start_next_task()
+ else:
+ self._final_exit(task)
+ self.wait()
+
diff --git a/portage_with_autodep/pym/_emerge/UninstallFailure.py b/portage_with_autodep/pym/_emerge/UninstallFailure.py
new file mode 100644
index 0000000..e4f2834
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UninstallFailure.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+
+class UninstallFailure(portage.exception.PortageException):
+ """
+ An instance of this class is raised by unmerge() when
+ an uninstallation fails.
+ """
+ status = 1
+ def __init__(self, *pargs):
+ portage.exception.PortageException.__init__(self, pargs)
+ if pargs:
+ self.status = pargs[0]
diff --git a/portage_with_autodep/pym/_emerge/UnmergeDepPriority.py b/portage_with_autodep/pym/_emerge/UnmergeDepPriority.py
new file mode 100644
index 0000000..4316600
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UnmergeDepPriority.py
@@ -0,0 +1,41 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractDepPriority import AbstractDepPriority
+class UnmergeDepPriority(AbstractDepPriority):
+ __slots__ = ("ignored", "optional", "satisfied",)
+ """
+ Combination of properties Priority Category
+
+ runtime 0 HARD
+ runtime_post -1 HARD
+ buildtime -2 SOFT
+ (none of the above) -2 SOFT
+ """
+
+ MAX = 0
+ SOFT = -2
+ MIN = -2
+
+ def __init__(self, **kwargs):
+ AbstractDepPriority.__init__(self, **kwargs)
+ if self.buildtime:
+ self.optional = True
+
+ def __int__(self):
+ if self.runtime:
+ return 0
+ if self.runtime_post:
+ return -1
+ if self.buildtime:
+ return -2
+ return -2
+
+ def __str__(self):
+ if self.ignored:
+ return "ignored"
+ myvalue = self.__int__()
+ if myvalue > self.SOFT:
+ return "hard"
+ return "soft"
+
diff --git a/portage_with_autodep/pym/_emerge/UseFlagDisplay.py b/portage_with_autodep/pym/_emerge/UseFlagDisplay.py
new file mode 100644
index 0000000..3daca19
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UseFlagDisplay.py
@@ -0,0 +1,122 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import chain
+import sys
+
+from portage import _encodings, _unicode_decode, _unicode_encode
+from portage.output import red
+from portage.util import cmp_sort_key
+from portage.output import blue
+
+class UseFlagDisplay(object):
+
+ __slots__ = ('name', 'enabled', 'forced')
+
+ def __init__(self, name, enabled, forced):
+ self.name = name
+ self.enabled = enabled
+ self.forced = forced
+
+ def __str__(self):
+ s = self.name
+ if self.enabled:
+ s = red(s)
+ else:
+ s = '-' + s
+ s = blue(s)
+ if self.forced:
+ s = '(%s)' % s
+ return s
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+ def _cmp_combined(a, b):
+ """
+ Sort by name, combining enabled and disabled flags.
+ """
+ return (a.name > b.name) - (a.name < b.name)
+
+ sort_combined = cmp_sort_key(_cmp_combined)
+ del _cmp_combined
+
+ def _cmp_separated(a, b):
+ """
+ Sort by name, separating enabled flags from disabled flags.
+ """
+ enabled_diff = b.enabled - a.enabled
+ if enabled_diff:
+ return enabled_diff
+ return (a.name > b.name) - (a.name < b.name)
+
+ sort_separated = cmp_sort_key(_cmp_separated)
+ del _cmp_separated
+
+def pkg_use_display(pkg, opts, modified_use=None):
+ settings = pkg.root_config.settings
+ use_expand = pkg.use.expand
+ use_expand_hidden = pkg.use.expand_hidden
+ alphabetical_use = '--alphabetical' in opts
+ forced_flags = set(chain(pkg.use.force,
+ pkg.use.mask))
+ if modified_use is None:
+ use = set(pkg.use.enabled)
+ else:
+ use = set(modified_use)
+ use.discard(settings.get('ARCH'))
+ use_expand_flags = set()
+ use_enabled = {}
+ use_disabled = {}
+ for varname in use_expand:
+ flag_prefix = varname.lower() + "_"
+ for f in use:
+ if f.startswith(flag_prefix):
+ use_expand_flags.add(f)
+ use_enabled.setdefault(
+ varname.upper(), []).append(f[len(flag_prefix):])
+
+ for f in pkg.iuse.all:
+ if f.startswith(flag_prefix):
+ use_expand_flags.add(f)
+ if f not in use:
+ use_disabled.setdefault(
+ varname.upper(), []).append(f[len(flag_prefix):])
+
+ var_order = set(use_enabled)
+ var_order.update(use_disabled)
+ var_order = sorted(var_order)
+ var_order.insert(0, 'USE')
+ use.difference_update(use_expand_flags)
+ use_enabled['USE'] = list(use)
+ use_disabled['USE'] = []
+
+ for f in pkg.iuse.all:
+ if f not in use and \
+ f not in use_expand_flags:
+ use_disabled['USE'].append(f)
+
+ flag_displays = []
+ for varname in var_order:
+ if varname.lower() in use_expand_hidden:
+ continue
+ flags = []
+ for f in use_enabled.get(varname, []):
+ flags.append(UseFlagDisplay(f, True, f in forced_flags))
+ for f in use_disabled.get(varname, []):
+ flags.append(UseFlagDisplay(f, False, f in forced_flags))
+ if alphabetical_use:
+ flags.sort(key=UseFlagDisplay.sort_combined)
+ else:
+ flags.sort(key=UseFlagDisplay.sort_separated)
+ # Use _unicode_decode() to force unicode format string so
+ # that UseFlagDisplay.__unicode__() is called in python2.
+ flag_displays.append('%s="%s"' % (varname,
+ ' '.join(_unicode_decode("%s") % (f,) for f in flags)))
+
+ return ' '.join(flag_displays)
diff --git a/portage_with_autodep/pym/_emerge/__init__.py b/portage_with_autodep/pym/_emerge/__init__.py
new file mode 100644
index 0000000..f98c564
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py b/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py
new file mode 100644
index 0000000..ca09d83
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py
@@ -0,0 +1,38 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+from _emerge.Package import Package
+
+def _find_deep_system_runtime_deps(graph):
+ deep_system_deps = set()
+ node_stack = []
+ for node in graph:
+ if not isinstance(node, Package) or \
+ node.operation == 'uninstall':
+ continue
+ if node.root_config.sets['system'].findAtomForPackage(node):
+ node_stack.append(node)
+
+ def ignore_priority(priority):
+ """
+ Ignore non-runtime priorities.
+ """
+ if isinstance(priority, DepPriority) and \
+ (priority.runtime or priority.runtime_post):
+ return False
+ return True
+
+ while node_stack:
+ node = node_stack.pop()
+ if node in deep_system_deps:
+ continue
+ deep_system_deps.add(node)
+ for child in graph.child_nodes(node, ignore_priority=ignore_priority):
+ if not isinstance(child, Package) or \
+ child.operation == 'uninstall':
+ continue
+ node_stack.append(child)
+
+ return deep_system_deps
+
diff --git a/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
new file mode 100644
index 0000000..eab4168
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.elog import mod_echo
+
+def _flush_elog_mod_echo():
+ """
+ Dump the mod_echo output now so that our other
+ notifications are shown last.
+ @rtype: bool
+ @returns: True if messages were shown, False otherwise.
+ """
+ messages_shown = bool(mod_echo._items)
+ mod_echo.finalize()
+ return messages_shown
diff --git a/portage_with_autodep/pym/_emerge/actions.py b/portage_with_autodep/pym/_emerge/actions.py
new file mode 100644
index 0000000..2166963
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/actions.py
@@ -0,0 +1,3123 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import errno
+import logging
+import platform
+import pwd
+import random
+import re
+import shutil
+import signal
+import socket
+import stat
+import sys
+import tempfile
+import textwrap
+import time
+from itertools import chain
+
+import portage
+from portage import os
+from portage import subprocess_getstatusoutput
+from portage import _unicode_decode
+from portage.cache.cache_errors import CacheError
+from portage.const import GLOBAL_CONFIG_PATH, NEWS_LIB_PATH
+from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_SET_CONFIG
+from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage.dep import Atom, extended_cp_match
+from portage.exception import InvalidAtom
+from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
+ red, yellow
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage._sets import load_default_config, SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import cmp_sort_key, writemsg, \
+ writemsg_level, writemsg_stdout
+from portage.util.digraph import digraph
+from portage._global_updates import _global_updates
+
+from _emerge.clear_caches import clear_caches
+from _emerge.countdown import countdown
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.Dependency import Dependency
+from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.emergelog import emergelog
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from _emerge.MetadataRegen import MetadataRegen
+from _emerge.Package import Package
+from _emerge.ProgressHandler import ProgressHandler
+from _emerge.RootConfig import RootConfig
+from _emerge.Scheduler import Scheduler
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.sync.getaddrinfo_validate import getaddrinfo_validate
+from _emerge.sync.old_tree_timestamp import old_tree_timestamp_warn
+from _emerge.unmerge import unmerge
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.userquery import userquery
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+def action_build(settings, trees, mtimedb,
+ myopts, myaction, myfiles, spinner):
+
+ if '--usepkgonly' not in myopts:
+ old_tree_timestamp_warn(settings['PORTDIR'], settings)
+
+ # It's best for config updates in /etc/portage to be processed
+ # before we get here, so warn if they're not (bug #267103).
+ chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
+
+ # validate the state of the resume data
+ # so that we can make assumptions later.
+ for k in ("resume", "resume_backup"):
+ if k not in mtimedb:
+ continue
+ resume_data = mtimedb[k]
+ if not isinstance(resume_data, dict):
+ del mtimedb[k]
+ continue
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ del mtimedb[k]
+ continue
+ for x in mergelist:
+ if not (isinstance(x, list) and len(x) == 4):
+ continue
+ pkg_type, pkg_root, pkg_key, pkg_action = x
+ if pkg_root not in trees:
+ # Current $ROOT setting differs,
+ # so the list must be stale.
+ mergelist = None
+ break
+ if not mergelist:
+ del mtimedb[k]
+ continue
+ resume_opts = resume_data.get("myopts")
+ if not isinstance(resume_opts, (dict, list)):
+ del mtimedb[k]
+ continue
+ favorites = resume_data.get("favorites")
+ if not isinstance(favorites, list):
+ del mtimedb[k]
+ continue
+
+ resume = False
+ if "--resume" in myopts and \
+ ("resume" in mtimedb or
+ "resume_backup" in mtimedb):
+ resume = True
+ if "resume" not in mtimedb:
+ mtimedb["resume"] = mtimedb["resume_backup"]
+ del mtimedb["resume_backup"]
+ mtimedb.commit()
+ # "myopts" is a list for backward compatibility.
+ resume_opts = mtimedb["resume"].get("myopts", [])
+ if isinstance(resume_opts, list):
+ resume_opts = dict((k,True) for k in resume_opts)
+ for opt in ("--ask", "--color", "--skipfirst", "--tree"):
+ resume_opts.pop(opt, None)
+
+ # Current options always override resume_opts.
+ resume_opts.update(myopts)
+ myopts.clear()
+ myopts.update(resume_opts)
+
+ if "--debug" in myopts:
+ writemsg_level("myopts %s\n" % (myopts,))
+
+ # Adjust config according to options of the command being resumed.
+ for myroot in trees:
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+ del myroot, mysettings
+
+ ldpath_mtimes = mtimedb["ldpath"]
+ favorites=[]
+ buildpkgonly = "--buildpkgonly" in myopts
+ pretend = "--pretend" in myopts
+ fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
+ ask = "--ask" in myopts
+ enter_invalid = '--ask-enter-invalid' in myopts
+ nodeps = "--nodeps" in myopts
+ oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
+ tree = "--tree" in myopts
+ if nodeps and tree:
+ tree = False
+ del myopts["--tree"]
+ portage.writemsg(colorize("WARN", " * ") + \
+ "--tree is broken with --nodeps. Disabling...\n")
+ debug = "--debug" in myopts
+ verbose = "--verbose" in myopts
+ quiet = "--quiet" in myopts
+ myparams = create_depgraph_params(myopts, myaction)
+
+ if pretend or fetchonly:
+ # make the mtimedb readonly
+ mtimedb.filename = None
+ if '--digest' in myopts or 'digest' in settings.features:
+ if '--digest' in myopts:
+ msg = "The --digest option"
+ else:
+ msg = "The FEATURES=digest setting"
+
+ msg += " can prevent corruption from being" + \
+ " noticed. The `repoman manifest` command is the preferred" + \
+ " way to generate manifests and it is capable of doing an" + \
+ " entire repository or category at once."
+ prefix = bad(" * ")
+ writemsg(prefix + "\n")
+ from textwrap import wrap
+ for line in wrap(msg, 72):
+ writemsg("%s%s\n" % (prefix, line))
+ writemsg(prefix + "\n")
+
+ if resume:
+ favorites = mtimedb["resume"].get("favorites")
+ if not isinstance(favorites, list):
+ favorites = []
+
+ resume_data = mtimedb["resume"]
+ mergelist = resume_data["mergelist"]
+ if mergelist and "--skipfirst" in myopts:
+ for i, task in enumerate(mergelist):
+ if isinstance(task, list) and \
+ task and task[-1] == "merge":
+ del mergelist[i]
+ break
+
+ success = False
+ mydepgraph = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ settings, trees, mtimedb, myopts, myparams, spinner)
+ except (portage.exception.PackageNotFound,
+ depgraph.UnsatisfiedResumeDep) as e:
+ if isinstance(e, depgraph.UnsatisfiedResumeDep):
+ mydepgraph = e.depgraph
+
+ from textwrap import wrap
+ from portage.output import EOutput
+ out = EOutput()
+
+ resume_data = mtimedb["resume"]
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ mergelist = []
+ if mergelist and debug or (verbose and not quiet):
+ out.eerror("Invalid resume list:")
+ out.eerror("")
+ indent = " "
+ for task in mergelist:
+ if isinstance(task, list):
+ out.eerror(indent + str(tuple(task)))
+ out.eerror("")
+
+ if isinstance(e, depgraph.UnsatisfiedResumeDep):
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ for dep in e.value:
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in wrap(msg, 72):
+ out.eerror(line)
+ elif isinstance(e, portage.exception.PackageNotFound):
+ out.eerror("An expected package is " + \
+ "not available: %s" % str(e))
+ out.eerror("")
+ msg = "The resume list contains one or more " + \
+ "packages that are no longer " + \
+ "available. Please restart/continue " + \
+ "the operation manually."
+ for line in wrap(msg, 72):
+ out.eerror(line)
+
+ if success:
+ if dropped_tasks:
+ portage.writemsg("!!! One or more packages have been " + \
+ "dropped due to\n" + \
+ "!!! masking or unsatisfied dependencies:\n\n",
+ noiselevel=-1)
+ for task in dropped_tasks:
+ portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
+ portage.writemsg("\n", noiselevel=-1)
+ del dropped_tasks
+ else:
+ if mydepgraph is not None:
+ mydepgraph.display_problems()
+ if not (ask or pretend):
+ # delete the current list and also the backup
+ # since it's probably stale too.
+ for k in ("resume", "resume_backup"):
+ mtimedb.pop(k, None)
+ mtimedb.commit()
+
+ return 1
+ else:
+ if ("--resume" in myopts):
+ print(darkgreen("emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+
+ try:
+ success, mydepgraph, favorites = backtrack_depgraph(
+ settings, trees, myopts, myparams, myaction, myfiles, spinner)
+ except portage.exception.PackageSetNotFound as e:
+ root_config = trees[settings["ROOT"]]["root_config"]
+ display_missing_pkg_set(root_config, e.value)
+ return 1
+
+ if not success:
+ mydepgraph.display_problems()
+ return 1
+
+ if "--pretend" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts) and \
+ not ("--quiet" in myopts and "--ask" not in myopts):
+ if "--resume" in myopts:
+ mymergelist = mydepgraph.altlist()
+ if len(mymergelist) == 0:
+ print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+ favorites = mtimedb["resume"]["favorites"]
+ retval = mydepgraph.display(
+ mydepgraph.altlist(reversed=tree),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ if retval != os.EX_OK:
+ return retval
+ prompt="Would you like to resume merging these packages?"
+ else:
+ retval = mydepgraph.display(
+ mydepgraph.altlist(reversed=("--tree" in myopts)),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ if retval != os.EX_OK:
+ return retval
+ mergecount=0
+ for x in mydepgraph.altlist():
+ if isinstance(x, Package) and x.operation == "merge":
+ mergecount += 1
+
+ if mergecount==0:
+ sets = trees[settings["ROOT"]]["root_config"].sets
+ world_candidates = None
+ if "selective" in myparams and \
+ not oneshot and favorites:
+ # Sets that are not world candidates are filtered
+ # out here since the favorites list needs to be
+ # complete for depgraph.loadResumeCommand() to
+ # operate correctly.
+ world_candidates = [x for x in favorites \
+ if not (x.startswith(SETPREFIX) and \
+ not sets[x[1:]].world_candidate)]
+ if "selective" in myparams and \
+ not oneshot and world_candidates:
+ print()
+ for x in world_candidates:
+ print(" %s %s" % (good("*"), x))
+ prompt="Would you like to add these packages to your world favorites?"
+ elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
+ prompt="Nothing to merge; would you like to auto-clean packages?"
+ else:
+ print()
+ print("Nothing to merge; quitting.")
+ print()
+ return os.EX_OK
+ elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
+ prompt="Would you like to fetch the source files for these packages?"
+ else:
+ prompt="Would you like to merge these packages?"
+ print()
+ if "--ask" in myopts and userquery(prompt, enter_invalid) == "No":
+ print()
+ print("Quitting.")
+ print()
+ return os.EX_OK
+ # Don't ask again (e.g. when auto-cleaning packages after merge)
+ myopts.pop("--ask", None)
+
+ if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
+ if ("--resume" in myopts):
+ mymergelist = mydepgraph.altlist()
+ if len(mymergelist) == 0:
+ print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+ favorites = mtimedb["resume"]["favorites"]
+ retval = mydepgraph.display(
+ mydepgraph.altlist(reversed=tree),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ if retval != os.EX_OK:
+ return retval
+ else:
+ retval = mydepgraph.display(
+ mydepgraph.altlist(reversed=("--tree" in myopts)),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ if retval != os.EX_OK:
+ return retval
+ if "--buildpkgonly" in myopts:
+ graph_copy = mydepgraph._dynamic_config.digraph.copy()
+ removed_nodes = set()
+ for node in graph_copy:
+ if not isinstance(node, Package) or \
+ node.operation == "nomerge":
+ removed_nodes.add(node)
+ graph_copy.difference_update(removed_nodes)
+ if not graph_copy.hasallzeros(ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium):
+ print("\n!!! --buildpkgonly requires all dependencies to be merged.")
+ print("!!! You have to merge the dependencies before you can build this package.\n")
+ return 1
+ else:
+ if "--buildpkgonly" in myopts:
+ graph_copy = mydepgraph._dynamic_config.digraph.copy()
+ removed_nodes = set()
+ for node in graph_copy:
+ if not isinstance(node, Package) or \
+ node.operation == "nomerge":
+ removed_nodes.add(node)
+ graph_copy.difference_update(removed_nodes)
+ if not graph_copy.hasallzeros(ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium):
+ print("\n!!! --buildpkgonly requires all dependencies to be merged.")
+ print("!!! Cannot merge requested packages. Merge deps and try again.\n")
+ return 1
+
+ if ("--resume" in myopts):
+ favorites=mtimedb["resume"]["favorites"]
+
+ else:
+ if "resume" in mtimedb and \
+ "mergelist" in mtimedb["resume"] and \
+ len(mtimedb["resume"]["mergelist"]) > 1:
+ mtimedb["resume_backup"] = mtimedb["resume"]
+ del mtimedb["resume"]
+ mtimedb.commit()
+
+ mydepgraph.saveNomergeFavorites()
+
+ mergetask = Scheduler(settings, trees, mtimedb, myopts,
+ spinner, favorites=favorites,
+ graph_config=mydepgraph.schedulerGraph())
+
+ del mydepgraph
+ clear_caches(trees)
+
+ retval = mergetask.merge()
+
+ if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
+ if "yes" == settings.get("AUTOCLEAN"):
+ portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+ unmerge(trees[settings["ROOT"]]["root_config"],
+ myopts, "clean", [],
+ ldpath_mtimes, autoclean=1)
+ else:
+ portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ + " AUTOCLEAN is disabled. This can cause serious"
+ + " problems due to overlapping packages.\n")
+
+ return retval
+
+def action_config(settings, trees, myopts, myfiles):
+ enter_invalid = '--ask-enter-invalid' in myopts
+ if len(myfiles) != 1:
+ print(red("!!! config can only take a single package atom at this time\n"))
+ sys.exit(1)
+ if not is_valid_package_atom(myfiles[0]):
+ portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
+ noiselevel=-1)
+ portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+ portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+ sys.exit(1)
+ print()
+ try:
+ pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
+ except portage.exception.AmbiguousPackageName as e:
+ # Multiple matches thrown from cpv_expand
+ pkgs = e.args[0]
+ if len(pkgs) == 0:
+ print("No packages found.\n")
+ sys.exit(0)
+ elif len(pkgs) > 1:
+ if "--ask" in myopts:
+ options = []
+ print("Please select a package to configure:")
+ idx = 0
+ for pkg in pkgs:
+ idx += 1
+ options.append(str(idx))
+ print(options[-1]+") "+pkg)
+ print("X) Cancel")
+ options.append("X")
+ idx = userquery("Selection?", enter_invalid, responses=options)
+ if idx == "X":
+ sys.exit(0)
+ pkg = pkgs[int(idx)-1]
+ else:
+ print("The following packages available:")
+ for pkg in pkgs:
+ print("* "+pkg)
+ print("\nPlease use a specific atom or the --ask option.")
+ sys.exit(1)
+ else:
+ pkg = pkgs[0]
+
+ print()
+ if "--ask" in myopts:
+ if userquery("Ready to configure %s?" % pkg, enter_invalid) == "No":
+ sys.exit(0)
+ else:
+ print("Configuring pkg...")
+ print()
+ ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
+ mysettings = portage.config(clone=settings)
+ vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
+ debug = mysettings.get("PORTAGE_DEBUG") == "1"
+ retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
+ mysettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
+ mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
+ if retval == os.EX_OK:
+ portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
+ mysettings, debug=debug, mydbapi=vardb, tree="vartree")
+ print()
+
+def action_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, myfiles, spinner, scheduler=None):
+ # Kill packages that aren't explicitly merged or are required as a
+ # dependency of another package. World file is explicit.
+
+ # Global depclean or prune operations are not very safe when there are
+ # missing dependencies since it's unknown how badly incomplete
+ # the dependency graph is, and we might accidentally remove packages
+ # that should have been pulled into the graph. On the other hand, it's
+ # relatively safe to ignore missing deps when only asked to remove
+ # specific packages.
+
+ msg = []
+ if not _ENABLE_DYN_LINK_MAP:
+ msg.append("Depclean may break link level dependencies. Thus, it is\n")
+ msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
+ msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
+ msg.append("\n")
+ msg.append("Always study the list of packages to be cleaned for any obvious\n")
+ msg.append("mistakes. Packages that are part of the world set will always\n")
+ msg.append("be kept. They can be manually added to this set with\n")
+ msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
+ msg.append("package.provided (see portage(5)) will be removed by\n")
+ msg.append("depclean, even if they are part of the world set.\n")
+ msg.append("\n")
+ msg.append("As a safety measure, depclean will not remove any packages\n")
+ msg.append("unless *all* required dependencies have been resolved. As a\n")
+ msg.append("consequence, it is often necessary to run %s\n" % \
+ good("`emerge --update"))
+ msg.append(good("--newuse --deep @world`") + \
+ " prior to depclean.\n")
+
+ if action == "depclean" and "--quiet" not in myopts and not myfiles:
+ portage.writemsg_stdout("\n")
+ for x in msg:
+ portage.writemsg_stdout(colorize("WARN", " * ") + x)
+
+ root_config = trees[settings['ROOT']]['root_config']
+ vardb = root_config.trees['vartree'].dbapi
+
+ args_set = InternalPackageSet(allow_repo=True)
+ if myfiles:
+ args_set.update(myfiles)
+ matched_packages = False
+ for x in args_set:
+ if vardb.match(x):
+ matched_packages = True
+ else:
+ writemsg_level("--- Couldn't find '%s' to %s.\n" % \
+ (x.replace("null/", ""), action),
+ level=logging.WARN, noiselevel=-1)
+ if not matched_packages:
+ writemsg_level(">>> No packages selected for removal by %s\n" % \
+ action)
+ return 0
+
+ # The calculation is done in a separate function so that depgraph
+ # references go out of scope and the corresponding memory
+ # is freed before we call unmerge().
+ rval, cleanlist, ordered, req_pkg_count = \
+ calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner)
+
+ clear_caches(trees)
+
+ if rval != os.EX_OK:
+ return rval
+
+ if cleanlist:
+ unmerge(root_config, myopts, "unmerge",
+ cleanlist, ldpath_mtimes, ordered=ordered,
+ scheduler=scheduler)
+
+ if action == "prune":
+ return
+
+ if not cleanlist and "--quiet" in myopts:
+ return
+
+ print("Packages installed: " + str(len(vardb.cpv_all())))
+ print("Packages in world: " + \
+ str(len(root_config.sets["selected"].getAtoms())))
+ print("Packages in system: " + \
+ str(len(root_config.sets["system"].getAtoms())))
+ print("Required packages: "+str(req_pkg_count))
+ if "--pretend" in myopts:
+ print("Number to remove: "+str(len(cleanlist)))
+ else:
+ print("Number removed: "+str(len(cleanlist)))
+
+def calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner):
+ allow_missing_deps = bool(args_set)
+
+ debug = '--debug' in myopts
+ xterm_titles = "notitles" not in settings.features
+ myroot = settings["ROOT"]
+ root_config = trees[myroot]["root_config"]
+ psets = root_config.setconfig.psets
+ deselect = myopts.get('--deselect') != 'n'
+ required_sets = {}
+ required_sets['world'] = psets['world']
+
+ # When removing packages, a temporary version of the world 'selected'
+ # set may be used which excludes packages that are intended to be
+ # eligible for removal.
+ selected_set = psets['selected']
+ required_sets['selected'] = selected_set
+ protected_set = InternalPackageSet()
+ protected_set_name = '____depclean_protected_set____'
+ required_sets[protected_set_name] = protected_set
+ system_set = psets["system"]
+
+ if not system_set or not selected_set:
+
+ if not system_set:
+ writemsg_level("!!! You have no system list.\n",
+ level=logging.ERROR, noiselevel=-1)
+
+ if not selected_set:
+ writemsg_level("!!! You have no world file.\n",
+ level=logging.WARNING, noiselevel=-1)
+
+ writemsg_level("!!! Proceeding is likely to " + \
+ "break your installation.\n",
+ level=logging.WARNING, noiselevel=-1)
+ if "--pretend" not in myopts:
+ countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
+
+ if action == "depclean":
+ emergelog(xterm_titles, " >>> depclean")
+
+ writemsg_level("\nCalculating dependencies ")
+ resolver_params = create_depgraph_params(myopts, "remove")
+ resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
+ resolver._load_vdb()
+ vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi
+ real_vardb = trees[myroot]["vartree"].dbapi
+
+ if action == "depclean":
+
+ if args_set:
+
+ if deselect:
+ # Start with an empty set.
+ selected_set = InternalPackageSet()
+ required_sets['selected'] = selected_set
+ # Pull in any sets nested within the selected set.
+ selected_set.update(psets['selected'].getNonAtoms())
+
+ # Pull in everything that's installed but not matched
+ # by an argument atom since we don't want to clean any
+ # package if something depends on it.
+ for pkg in vardb:
+ if spinner:
+ spinner.update()
+
+ try:
+ if args_set.findAtomForPackage(pkg) is None:
+ protected_set.add("=" + pkg.cpv)
+ continue
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg,
+ pkg.metadata["PROVIDE"], str(e))
+ del e
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ elif action == "prune":
+
+ if deselect:
+ # Start with an empty set.
+ selected_set = InternalPackageSet()
+ required_sets['selected'] = selected_set
+ # Pull in any sets nested within the selected set.
+ selected_set.update(psets['selected'].getNonAtoms())
+
+ # Pull in everything that's installed since we don't
+ # to prune a package if something depends on it.
+ protected_set.update(vardb.cp_all())
+
+ if not args_set:
+
+ # Try to prune everything that's slotted.
+ for cp in vardb.cp_all():
+ if len(vardb.cp_list(cp)) > 1:
+ args_set.add(cp)
+
+ # Remove atoms from world that match installed packages
+ # that are also matched by argument atoms, but do not remove
+ # them if they match the highest installed version.
+ for pkg in vardb:
+ spinner.update()
+ pkgs_for_cp = vardb.match_pkgs(pkg.cp)
+ if not pkgs_for_cp or pkg not in pkgs_for_cp:
+ raise AssertionError("package expected in matches: " + \
+ "cp = %s, cpv = %s matches = %s" % \
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+ highest_version = pkgs_for_cp[-1]
+ if pkg == highest_version:
+ # pkg is the highest version
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ if len(pkgs_for_cp) <= 1:
+ raise AssertionError("more packages expected: " + \
+ "cp = %s, cpv = %s matches = %s" % \
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+ try:
+ if args_set.findAtomForPackage(pkg) is None:
+ protected_set.add("=" + pkg.cpv)
+ continue
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg,
+ pkg.metadata["PROVIDE"], str(e))
+ del e
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ if resolver._frozen_config.excluded_pkgs:
+ excluded_set = resolver._frozen_config.excluded_pkgs
+ required_sets['__excluded__'] = InternalPackageSet()
+
+ for pkg in vardb:
+ if spinner:
+ spinner.update()
+
+ try:
+ if excluded_set.findAtomForPackage(pkg):
+ required_sets['__excluded__'].add("=" + pkg.cpv)
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg,
+ pkg.metadata["PROVIDE"], str(e))
+ del e
+ required_sets['__excluded__'].add("=" + pkg.cpv)
+
+ success = resolver._complete_graph(required_sets={myroot:required_sets})
+ writemsg_level("\b\b... done!\n")
+
+ resolver.display_problems()
+
+ if not success:
+ return 1, [], False, 0
+
+ def unresolved_deps():
+
+ unresolvable = set()
+ for dep in resolver._dynamic_config._initially_unsatisfied_deps:
+ if isinstance(dep.parent, Package) and \
+ (dep.priority > UnmergeDepPriority.SOFT):
+ unresolvable.add((dep.atom, dep.parent.cpv))
+
+ if not unresolvable:
+ return False
+
+ if unresolvable and not allow_missing_deps:
+
+ if "--debug" in myopts:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ resolver._dynamic_config.digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ prefix = bad(" * ")
+ msg = []
+ msg.append("Dependencies could not be completely resolved due to")
+ msg.append("the following required packages not being installed:")
+ msg.append("")
+ for atom, parent in unresolvable:
+ msg.append(" %s pulled in by:" % (atom,))
+ msg.append(" %s" % (parent,))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Have you forgotten to do a complete update prior " + \
+ "to depclean? The most comprehensive command for this " + \
+ "purpose is as follows:", 65
+ ))
+ msg.append("")
+ msg.append(" " + \
+ good("emerge --update --newuse --deep --with-bdeps=y @world"))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Note that the --with-bdeps=y option is not required in " + \
+ "many situations. Refer to the emerge manual page " + \
+ "(run `man emerge`) for more information about " + \
+ "--with-bdeps.", 65
+ ))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Also, note that it may be necessary to manually uninstall " + \
+ "packages that no longer exist in the portage tree, since " + \
+ "it may not be possible to satisfy their dependencies.", 65
+ ))
+ if action == "prune":
+ msg.append("")
+ msg.append("If you would like to ignore " + \
+ "dependencies then use %s." % good("--nodeps"))
+ writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return True
+ return False
+
+ if unresolved_deps():
+ return 1, [], False, 0
+
+ graph = resolver._dynamic_config.digraph.copy()
+ required_pkgs_total = 0
+ for node in graph:
+ if isinstance(node, Package):
+ required_pkgs_total += 1
+
+ def show_parents(child_node):
+ parent_nodes = graph.parent_nodes(child_node)
+ if not parent_nodes:
+ # With --prune, the highest version can be pulled in without any
+ # real parent since all installed packages are pulled in. In that
+ # case there's nothing to show here.
+ return
+ parent_strs = []
+ for node in parent_nodes:
+ parent_strs.append(str(getattr(node, "cpv", node)))
+ parent_strs.sort()
+ msg = []
+ msg.append(" %s pulled in by:\n" % (child_node.cpv,))
+ for parent_str in parent_strs:
+ msg.append(" %s\n" % (parent_str,))
+ msg.append("\n")
+ portage.writemsg_stdout("".join(msg), noiselevel=-1)
+
+ def cmp_pkg_cpv(pkg1, pkg2):
+ """Sort Package instances by cpv."""
+ if pkg1.cpv > pkg2.cpv:
+ return 1
+ elif pkg1.cpv == pkg2.cpv:
+ return 0
+ else:
+ return -1
+
+ def create_cleanlist():
+
+ if "--debug" in myopts:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ graph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ # Never display the special internal protected_set.
+ for node in graph:
+ if isinstance(node, SetArg) and node.name == protected_set_name:
+ graph.remove(node)
+ break
+
+ pkgs_to_remove = []
+
+ if action == "depclean":
+ if args_set:
+
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+ arg_atom = None
+ try:
+ arg_atom = args_set.findAtomForPackage(pkg)
+ except portage.exception.InvalidDependString:
+ # this error has already been displayed by now
+ continue
+
+ if arg_atom:
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ else:
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ elif action == "prune":
+
+ for atom in args_set:
+ for pkg in vardb.match_pkgs(atom):
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ if not pkgs_to_remove:
+ writemsg_level(
+ ">>> No packages selected for removal by %s\n" % action)
+ if "--verbose" not in myopts:
+ writemsg_level(
+ ">>> To see reverse dependencies, use %s\n" % \
+ good("--verbose"))
+ if action == "prune":
+ writemsg_level(
+ ">>> To ignore dependencies, use %s\n" % \
+ good("--nodeps"))
+
+ return pkgs_to_remove
+
+ cleanlist = create_cleanlist()
+ clean_set = set(cleanlist)
+
+ if cleanlist and \
+ real_vardb._linkmap is not None and \
+ myopts.get("--depclean-lib-check") != "n" and \
+ "preserve-libs" not in settings.features:
+
+ # Check if any of these packages are the sole providers of libraries
+ # with consumers that have not been selected for removal. If so, these
+ # packages and any dependencies need to be added to the graph.
+ linkmap = real_vardb._linkmap
+ consumer_cache = {}
+ provider_cache = {}
+ consumer_map = {}
+
+ writemsg_level(">>> Checking for lib consumers...\n")
+
+ for pkg in cleanlist:
+ pkg_dblink = real_vardb._dblink(pkg.cpv)
+ consumers = {}
+
+ for lib in pkg_dblink.getcontents():
+ lib = lib[len(myroot):]
+ lib_key = linkmap._obj_key(lib)
+ lib_consumers = consumer_cache.get(lib_key)
+ if lib_consumers is None:
+ try:
+ lib_consumers = linkmap.findConsumers(lib_key)
+ except KeyError:
+ continue
+ consumer_cache[lib_key] = lib_consumers
+ if lib_consumers:
+ consumers[lib_key] = lib_consumers
+
+ if not consumers:
+ continue
+
+ for lib, lib_consumers in list(consumers.items()):
+ for consumer_file in list(lib_consumers):
+ if pkg_dblink.isowner(consumer_file):
+ lib_consumers.remove(consumer_file)
+ if not lib_consumers:
+ del consumers[lib]
+
+ if not consumers:
+ continue
+
+ for lib, lib_consumers in consumers.items():
+
+ soname = linkmap.getSoname(lib)
+
+ consumer_providers = []
+ for lib_consumer in lib_consumers:
+ providers = provider_cache.get(lib)
+ if providers is None:
+ providers = linkmap.findProviders(lib_consumer)
+ provider_cache[lib_consumer] = providers
+ if soname not in providers:
+ # Why does this happen?
+ continue
+ consumer_providers.append(
+ (lib_consumer, providers[soname]))
+
+ consumers[lib] = consumer_providers
+
+ consumer_map[pkg] = consumers
+
+ if consumer_map:
+
+ search_files = set()
+ for consumers in consumer_map.values():
+ for lib, consumer_providers in consumers.items():
+ for lib_consumer, providers in consumer_providers:
+ search_files.add(lib_consumer)
+ search_files.update(providers)
+
+ writemsg_level(">>> Assigning files to packages...\n")
+ file_owners = {}
+ for f in search_files:
+ owner_set = set()
+ for owner in linkmap.getOwners(f):
+ owner_dblink = real_vardb._dblink(owner)
+ if owner_dblink.exists():
+ owner_set.add(owner_dblink)
+ if owner_set:
+ file_owners[f] = owner_set
+
+ for pkg, consumers in list(consumer_map.items()):
+ for lib, consumer_providers in list(consumers.items()):
+ lib_consumers = set()
+
+ for lib_consumer, providers in consumer_providers:
+ owner_set = file_owners.get(lib_consumer)
+ provider_dblinks = set()
+ provider_pkgs = set()
+
+ if len(providers) > 1:
+ for provider in providers:
+ provider_set = file_owners.get(provider)
+ if provider_set is not None:
+ provider_dblinks.update(provider_set)
+
+ if len(provider_dblinks) > 1:
+ for provider_dblink in provider_dblinks:
+ provider_pkg = resolver._pkg(
+ provider_dblink.mycpv, "installed",
+ root_config, installed=True)
+ if provider_pkg not in clean_set:
+ provider_pkgs.add(provider_pkg)
+
+ if provider_pkgs:
+ continue
+
+ if owner_set is not None:
+ lib_consumers.update(owner_set)
+
+ for consumer_dblink in list(lib_consumers):
+ if resolver._pkg(consumer_dblink.mycpv, "installed",
+ root_config, installed=True) in clean_set:
+ lib_consumers.remove(consumer_dblink)
+ continue
+
+ if lib_consumers:
+ consumers[lib] = lib_consumers
+ else:
+ del consumers[lib]
+ if not consumers:
+ del consumer_map[pkg]
+
+ if consumer_map:
+ # TODO: Implement a package set for rebuilding consumer packages.
+
+ msg = "In order to avoid breakage of link level " + \
+ "dependencies, one or more packages will not be removed. " + \
+ "This can be solved by rebuilding " + \
+ "the packages that pulled them in."
+
+ prefix = bad(" * ")
+ from textwrap import wrap
+ writemsg_level("".join(prefix + "%s\n" % line for \
+ line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
+
+ msg = []
+ for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
+ consumers = consumer_map[pkg]
+ consumer_libs = {}
+ for lib, lib_consumers in consumers.items():
+ for consumer in lib_consumers:
+ consumer_libs.setdefault(
+ consumer.mycpv, set()).add(linkmap.getSoname(lib))
+ unique_consumers = set(chain(*consumers.values()))
+ unique_consumers = sorted(consumer.mycpv \
+ for consumer in unique_consumers)
+ msg.append("")
+ msg.append(" %s pulled in by:" % (pkg.cpv,))
+ for consumer in unique_consumers:
+ libs = consumer_libs[consumer]
+ msg.append(" %s needs %s" % \
+ (consumer, ', '.join(sorted(libs))))
+ msg.append("")
+ writemsg_level("".join(prefix + "%s\n" % line for line in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ # Add lib providers to the graph as children of lib consumers,
+ # and also add any dependencies pulled in by the provider.
+ writemsg_level(">>> Adding lib providers to graph...\n")
+
+ for pkg, consumers in consumer_map.items():
+ for consumer_dblink in set(chain(*consumers.values())):
+ consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
+ "installed", root_config, installed=True)
+ if not resolver._add_pkg(pkg,
+ Dependency(parent=consumer_pkg,
+ priority=UnmergeDepPriority(runtime=True),
+ root=pkg.root)):
+ resolver.display_problems()
+ return 1, [], False, 0
+
+ writemsg_level("\nCalculating dependencies ")
+ success = resolver._complete_graph(
+ required_sets={myroot:required_sets})
+ writemsg_level("\b\b... done!\n")
+ resolver.display_problems()
+ if not success:
+ return 1, [], False, 0
+ if unresolved_deps():
+ return 1, [], False, 0
+
+ graph = resolver._dynamic_config.digraph.copy()
+ required_pkgs_total = 0
+ for node in graph:
+ if isinstance(node, Package):
+ required_pkgs_total += 1
+ cleanlist = create_cleanlist()
+ if not cleanlist:
+ return 0, [], False, required_pkgs_total
+ clean_set = set(cleanlist)
+
+ if clean_set:
+ writemsg_level(">>> Calculating removal order...\n")
+ # Use a topological sort to create an unmerge order such that
+ # each package is unmerged before it's dependencies. This is
+ # necessary to avoid breaking things that may need to run
+ # during pkg_prerm or pkg_postrm phases.
+
+ # Create a new graph to account for dependencies between the
+ # packages being unmerged.
+ graph = digraph()
+ del cleanlist[:]
+
+ dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+ runtime = UnmergeDepPriority(runtime=True)
+ runtime_post = UnmergeDepPriority(runtime_post=True)
+ buildtime = UnmergeDepPriority(buildtime=True)
+ priority_map = {
+ "RDEPEND": runtime,
+ "PDEPEND": runtime_post,
+ "DEPEND": buildtime,
+ }
+
+ for node in clean_set:
+ graph.add(node, None)
+ mydeps = []
+ for dep_type in dep_keys:
+ depstr = node.metadata[dep_type]
+ if not depstr:
+ continue
+ priority = priority_map[dep_type]
+
+ if debug:
+ writemsg_level(_unicode_decode("\nParent: %s\n") \
+ % (node,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level(_unicode_decode( "Depstring: %s\n") \
+ % (depstr,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level(_unicode_decode( "Priority: %s\n") \
+ % (priority,), noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ atoms = resolver._select_atoms(myroot, depstr,
+ myuse=node.use.enabled, parent=node,
+ priority=priority)[node]
+ except portage.exception.InvalidDependString:
+ # Ignore invalid deps of packages that will
+ # be uninstalled anyway.
+ continue
+
+ if debug:
+ writemsg_level("Candidates: [%s]\n" % \
+ ', '.join(_unicode_decode("'%s'") % (x,) for x in atoms),
+ noiselevel=-1, level=logging.DEBUG)
+
+ for atom in atoms:
+ if not isinstance(atom, portage.dep.Atom):
+ # Ignore invalid atoms returned from dep_check().
+ continue
+ if atom.blocker:
+ continue
+ matches = vardb.match_pkgs(atom)
+ if not matches:
+ continue
+ for child_node in matches:
+ if child_node in clean_set:
+ graph.add(child_node, node, priority=priority)
+
+ if debug:
+ writemsg_level("\nunmerge digraph:\n\n",
+ noiselevel=-1, level=logging.DEBUG)
+ graph.debug_print()
+ writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
+
+ ordered = True
+ if len(graph.order) == len(graph.root_nodes()):
+ # If there are no dependencies between packages
+ # let unmerge() group them by cat/pn.
+ ordered = False
+ cleanlist = [pkg.cpv for pkg in graph.order]
+ else:
+ # Order nodes from lowest to highest overall reference count for
+ # optimal root node selection (this can help minimize issues
+ # with unaccounted implicit dependencies).
+ node_refcounts = {}
+ for node in graph.order:
+ node_refcounts[node] = len(graph.parent_nodes(node))
+ def cmp_reference_count(node1, node2):
+ return node_refcounts[node1] - node_refcounts[node2]
+ graph.order.sort(key=cmp_sort_key(cmp_reference_count))
+
+ ignore_priority_range = [None]
+ ignore_priority_range.extend(
+ range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
+ while graph:
+ for ignore_priority in ignore_priority_range:
+ nodes = graph.root_nodes(ignore_priority=ignore_priority)
+ if nodes:
+ break
+ if not nodes:
+ raise AssertionError("no root nodes")
+ if ignore_priority is not None:
+ # Some deps have been dropped due to circular dependencies,
+ # so only pop one node in order to minimize the number that
+ # are dropped.
+ del nodes[1:]
+ for node in nodes:
+ graph.remove(node)
+ cleanlist.append(node.cpv)
+
+ return 0, cleanlist, ordered, required_pkgs_total
+ return 0, [], False, required_pkgs_total
+
+def action_deselect(settings, trees, opts, atoms):
+ enter_invalid = '--ask-enter-invalid' in opts
+ root_config = trees[settings['ROOT']]['root_config']
+ world_set = root_config.sets['selected']
+ if not hasattr(world_set, 'update'):
+ writemsg_level("World @selected set does not appear to be mutable.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ pretend = '--pretend' in opts
+ locked = False
+ if not pretend and hasattr(world_set, 'lock'):
+ world_set.lock()
+ locked = True
+ try:
+ world_set.load()
+ world_atoms = world_set.getAtoms()
+ vardb = root_config.trees["vartree"].dbapi
+ expanded_atoms = set(atoms)
+
+ for atom in atoms:
+ if not atom.startswith(SETPREFIX):
+ if atom.cp.startswith("null/"):
+ # try to expand category from world set
+ null_cat, pn = portage.catsplit(atom.cp)
+ for world_atom in world_atoms:
+ cat, world_pn = portage.catsplit(world_atom.cp)
+ if pn == world_pn:
+ expanded_atoms.add(
+ Atom(atom.replace("null", cat, 1),
+ allow_repo=True, allow_wildcard=True))
+
+ for cpv in vardb.match(atom):
+ slot, = vardb.aux_get(cpv, ["SLOT"])
+ if not slot:
+ slot = "0"
+ expanded_atoms.add(Atom("%s:%s" % \
+ (portage.cpv_getkey(cpv), slot)))
+
+ discard_atoms = set()
+ for atom in world_set:
+ for arg_atom in expanded_atoms:
+ if arg_atom.startswith(SETPREFIX):
+ if atom.startswith(SETPREFIX) and \
+ arg_atom == atom:
+ discard_atoms.add(atom)
+ break
+ else:
+ if not atom.startswith(SETPREFIX) and \
+ arg_atom.intersects(atom) and \
+ not (arg_atom.slot and not atom.slot) and \
+ not (arg_atom.repo and not atom.repo):
+ discard_atoms.add(atom)
+ break
+ if discard_atoms:
+ for atom in sorted(discard_atoms):
+ if pretend:
+ print(">>> Would remove %s from \"world\" favorites file..." % \
+ colorize("INFORM", str(atom)))
+ else:
+ print(">>> Removing %s from \"world\" favorites file..." % \
+ colorize("INFORM", str(atom)))
+
+ if '--ask' in opts:
+ prompt = "Would you like to remove these " + \
+ "packages from your world favorites?"
+ if userquery(prompt, enter_invalid) == 'No':
+ return os.EX_OK
+
+ remaining = set(world_set)
+ remaining.difference_update(discard_atoms)
+ if not pretend:
+ world_set.replace(remaining)
+ else:
+ print(">>> No matching atoms found in \"world\" favorites file...")
+ finally:
+ if locked:
+ world_set.unlock()
+ return os.EX_OK
+
+class _info_pkgs_ver(object):
+ def __init__(self, ver, repo_suffix, provide_suffix):
+ self.ver = ver
+ self.repo_suffix = repo_suffix
+ self.provide_suffix = provide_suffix
+
+ def __lt__(self, other):
+ return portage.versions.vercmp(self.ver, other.ver) < 0
+
+ def toString(self):
+ """
+ This may return unicode if repo_name contains unicode.
+ Don't use __str__ and str() since unicode triggers compatibility
+ issues between python 2.x and 3.x.
+ """
+ return self.ver + self.repo_suffix + self.provide_suffix
+
+def action_info(settings, trees, myopts, myfiles):
+
+ output_buffer = []
+ append = output_buffer.append
+ root_config = trees[settings['ROOT']]['root_config']
+
+ append(getportageversion(settings["PORTDIR"], settings["ROOT"],
+ settings.profile_path, settings["CHOST"],
+ trees[settings["ROOT"]]["vartree"].dbapi))
+
+ header_width = 65
+ header_title = "System Settings"
+ if myfiles:
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("System uname: %s" % (platform.platform(aliased=1),))
+
+ lastSync = portage.grabfile(os.path.join(
+ settings["PORTDIR"], "metadata", "timestamp.chk"))
+ if lastSync:
+ lastSync = lastSync[0]
+ else:
+ lastSync = "Unknown"
+ append("Timestamp of tree: %s" % (lastSync,))
+
+ output=subprocess_getstatusoutput("distcc --version")
+ if output[0] == os.EX_OK:
+ distcc_str = output[1].split("\n", 1)[0]
+ if "distcc" in settings.features:
+ distcc_str += " [enabled]"
+ else:
+ distcc_str += " [disabled]"
+ append(distcc_str)
+
+ output=subprocess_getstatusoutput("ccache -V")
+ if output[0] == os.EX_OK:
+ ccache_str = output[1].split("\n", 1)[0]
+ if "ccache" in settings.features:
+ ccache_str += " [enabled]"
+ else:
+ ccache_str += " [disabled]"
+ append(ccache_str)
+
+ myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
+ "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
+ myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
+ atoms = []
+ vardb = trees["/"]["vartree"].dbapi
+ for x in myvars:
+ try:
+ x = Atom(x)
+ except InvalidAtom:
+ append("%-20s %s" % (x+":", "[NOT VALID]"))
+ else:
+ for atom in expand_new_virt(vardb, x):
+ if not atom.blocker:
+ atoms.append((x, atom))
+
+ myvars = sorted(set(atoms))
+
+ portdb = trees["/"]["porttree"].dbapi
+ main_repo = portdb.getRepositoryName(portdb.porttree_root)
+ cp_map = {}
+ cp_max_len = 0
+
+ for orig_atom, x in myvars:
+ pkg_matches = vardb.match(x)
+
+ versions = []
+ for cpv in pkg_matches:
+ matched_cp = portage.versions.cpv_getkey(cpv)
+ ver = portage.versions.cpv_getversion(cpv)
+ ver_map = cp_map.setdefault(matched_cp, {})
+ prev_match = ver_map.get(ver)
+ if prev_match is not None:
+ if prev_match.provide_suffix:
+ # prefer duplicate matches that include
+ # additional virtual provider info
+ continue
+
+ if len(matched_cp) > cp_max_len:
+ cp_max_len = len(matched_cp)
+ repo = vardb.aux_get(cpv, ["repository"])[0]
+ if repo == main_repo:
+ repo_suffix = ""
+ elif not repo:
+ repo_suffix = "::<unknown repository>"
+ else:
+ repo_suffix = "::" + repo
+
+ if matched_cp == orig_atom.cp:
+ provide_suffix = ""
+ else:
+ provide_suffix = " (%s)" % (orig_atom,)
+
+ ver_map[ver] = _info_pkgs_ver(ver, repo_suffix, provide_suffix)
+
+ for cp in sorted(cp_map):
+ versions = sorted(cp_map[cp].values())
+ versions = ", ".join(ver.toString() for ver in versions)
+ append("%s %s" % \
+ ((cp + ":").ljust(cp_max_len + 1), versions))
+
+ libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
+
+ repos = portdb.settings.repositories
+ if "--verbose" in myopts:
+ append("Repositories:\n")
+ for repo in repos:
+ append(repo.info_string())
+ else:
+ append("Repositories: %s" % \
+ " ".join(repo.name for repo in repos))
+
+ if _ENABLE_SET_CONFIG:
+ sets_line = "Installed sets: "
+ sets_line += ", ".join(s for s in \
+ sorted(root_config.sets['selected'].getNonAtoms()) \
+ if s.startswith(SETPREFIX))
+ append(sets_line)
+
+ if "--verbose" in myopts:
+ myvars = list(settings)
+ else:
+ myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
+ 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
+ 'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
+ 'PORTAGE_BZIP2_COMMAND',
+ 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
+ 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'SYNC', 'FEATURES',
+ 'EMERGE_DEFAULT_OPTS']
+
+ myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
+
+ myvars_ignore_defaults = {
+ 'PORTAGE_BZIP2_COMMAND' : 'bzip2',
+ }
+
+ myvars = portage.util.unique_array(myvars)
+ use_expand = settings.get('USE_EXPAND', '').split()
+ use_expand.sort()
+ use_expand_hidden = set(
+ settings.get('USE_EXPAND_HIDDEN', '').upper().split())
+ alphabetical_use = '--alphabetical' in myopts
+ unset_vars = []
+ myvars.sort()
+ for k in myvars:
+ v = settings.get(k)
+ if v is not None:
+ if k != "USE":
+ default = myvars_ignore_defaults.get(k)
+ if default is not None and \
+ default == v:
+ continue
+ append('%s="%s"' % (k, v))
+ else:
+ use = set(v.split())
+ for varname in use_expand:
+ flag_prefix = varname.lower() + "_"
+ for f in list(use):
+ if f.startswith(flag_prefix):
+ use.remove(f)
+ use = list(use)
+ use.sort()
+ use = ['USE="%s"' % " ".join(use)]
+ for varname in use_expand:
+ myval = settings.get(varname)
+ if myval:
+ use.append('%s="%s"' % (varname, myval))
+ append(" ".join(use))
+ else:
+ unset_vars.append(k)
+ if unset_vars:
+ append("Unset: "+", ".join(unset_vars))
+ append("")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+
+ # See if we can find any packages installed matching the strings
+ # passed on the command line
+ mypkgs = []
+ vardb = trees[settings["ROOT"]]["vartree"].dbapi
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ bindb = trees[settings["ROOT"]]["bintree"].dbapi
+ for x in myfiles:
+ match_found = False
+ installed_match = vardb.match(x)
+ for installed in installed_match:
+ mypkgs.append((installed, "installed"))
+ match_found = True
+
+ if match_found:
+ continue
+
+ for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
+ if pkg_type == "binary" and "--usepkg" not in myopts:
+ continue
+
+ matches = db.match(x)
+ matches.reverse()
+ for match in matches:
+ if pkg_type == "binary":
+ if db.bintree.isremote(match):
+ continue
+ auxkeys = ["EAPI", "DEFINED_PHASES"]
+ metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
+ if metadata["EAPI"] not in ("0", "1", "2", "3") and \
+ "info" in metadata["DEFINED_PHASES"].split():
+ mypkgs.append((match, pkg_type))
+ break
+
+ # If some packages were found...
+ if mypkgs:
+ # Get our global settings (we only print stuff if it varies from
+ # the current config)
+ mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
+ auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
+ auxkeys.append('DEFINED_PHASES')
+ global_vals = {}
+ pkgsettings = portage.config(clone=settings)
+
+ # Loop through each package
+ # Only print settings if they differ from global settings
+ header_title = "Package Settings"
+ print(header_width * "=")
+ print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ print(header_width * "=")
+ from portage.output import EOutput
+ out = EOutput()
+ for mypkg in mypkgs:
+ cpv = mypkg[0]
+ pkg_type = mypkg[1]
+ # Get all package specific variables
+ if pkg_type == "installed":
+ metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
+ elif pkg_type == "ebuild":
+ metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
+ elif pkg_type == "binary":
+ metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
+
+ pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
+ installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
+ (metadata.get(x, '') for x in Package.metadata_keys)),
+ root_config=root_config, type_name=pkg_type)
+
+ if pkg_type == "installed":
+ print("\n%s was built with the following:" % \
+ colorize("INFORM", str(pkg.cpv)))
+ elif pkg_type == "ebuild":
+ print("\n%s would be build with the following:" % \
+ colorize("INFORM", str(pkg.cpv)))
+ elif pkg_type == "binary":
+ print("\n%s (non-installed binary) was built with the following:" % \
+ colorize("INFORM", str(pkg.cpv)))
+
+ writemsg_stdout('%s\n' % pkg_use_display(pkg, myopts),
+ noiselevel=-1)
+ if pkg_type == "installed":
+ for myvar in mydesiredvars:
+ if metadata[myvar].split() != settings.get(myvar, '').split():
+ print("%s=\"%s\"" % (myvar, metadata[myvar]))
+ print()
+
+ if metadata['DEFINED_PHASES']:
+ if 'info' not in metadata['DEFINED_PHASES'].split():
+ continue
+
+ print(">>> Attempting to run pkg_info() for '%s'" % pkg.cpv)
+
+ if pkg_type == "installed":
+ ebuildpath = vardb.findname(pkg.cpv)
+ elif pkg_type == "ebuild":
+ ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ elif pkg_type == "binary":
+ tbz2_file = bindb.bintree.getname(pkg.cpv)
+ ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
+ ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
+ tmpdir = tempfile.mkdtemp()
+ ebuildpath = os.path.join(tmpdir, ebuild_file_name)
+ file = open(ebuildpath, 'w')
+ file.write(ebuild_file_contents)
+ file.close()
+
+ if not ebuildpath or not os.path.exists(ebuildpath):
+ out.ewarn("No ebuild found for '%s'" % pkg.cpv)
+ continue
+
+ if pkg_type == "installed":
+ portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
+ pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
+ tree="vartree")
+ elif pkg_type == "ebuild":
+ portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
+ pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings["ROOT"]]["porttree"].dbapi,
+ tree="porttree")
+ elif pkg_type == "binary":
+ portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
+ pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings["ROOT"]]["bintree"].dbapi,
+ tree="bintree")
+ shutil.rmtree(tmpdir)
+
+def action_metadata(settings, portdb, myopts, porttrees=None):
+ if porttrees is None:
+ porttrees = portdb.porttrees
+ portage.writemsg_stdout("\n>>> Updating Portage cache\n")
+ old_umask = os.umask(0o002)
+ cachedir = os.path.normpath(settings.depcachedir)
+ if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
+ "/lib", "/opt", "/proc", "/root", "/sbin",
+ "/sys", "/tmp", "/usr", "/var"]:
+ print("!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
+ "ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr)
+ print("!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr)
+ sys.exit(73)
+ if not os.path.exists(cachedir):
+ os.makedirs(cachedir)
+
+ auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
+ auxdbkeys = tuple(auxdbkeys)
+
+ class TreeData(object):
+ __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
+ def __init__(self, dest_db, eclass_db, path, src_db):
+ self.dest_db = dest_db
+ self.eclass_db = eclass_db
+ self.path = path
+ self.src_db = src_db
+ self.valid_nodes = set()
+
+ porttrees_data = []
+ for path in porttrees:
+ src_db = portdb._pregen_auxdb.get(path)
+ if src_db is None and \
+ os.path.isdir(os.path.join(path, 'metadata', 'cache')):
+ src_db = portdb.metadbmodule(
+ path, 'metadata/cache', auxdbkeys, readonly=True)
+ try:
+ src_db.ec = portdb._repo_info[path].eclass_db
+ except AttributeError:
+ pass
+
+ if src_db is not None:
+ porttrees_data.append(TreeData(portdb.auxdb[path],
+ portdb._repo_info[path].eclass_db, path, src_db))
+
+ porttrees = [tree_data.path for tree_data in porttrees_data]
+
+ quiet = settings.get('TERM') == 'dumb' or \
+ '--quiet' in myopts or \
+ not sys.stdout.isatty()
+
+ onProgress = None
+ if not quiet:
+ progressBar = portage.output.TermProgressBar()
+ progressHandler = ProgressHandler()
+ onProgress = progressHandler.onProgress
+ def display():
+ progressBar.set(progressHandler.curval, progressHandler.maxval)
+ progressHandler.display = display
+ def sigwinch_handler(signum, frame):
+ lines, progressBar.term_columns = \
+ portage.output.get_term_size()
+ signal.signal(signal.SIGWINCH, sigwinch_handler)
+
+ # Temporarily override portdb.porttrees so portdb.cp_all()
+ # will only return the relevant subset.
+ portdb_porttrees = portdb.porttrees
+ portdb.porttrees = porttrees
+ try:
+ cp_all = portdb.cp_all()
+ finally:
+ portdb.porttrees = portdb_porttrees
+
+ curval = 0
+ maxval = len(cp_all)
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ from portage.cache.util import quiet_mirroring
+ from portage import eapi_is_supported, \
+ _validate_cache_for_unsupported_eapis
+
+ # TODO: Display error messages, but do not interfere with the progress bar.
+ # Here's how:
+ # 1) erase the progress bar
+ # 2) show the error message
+ # 3) redraw the progress bar on a new line
+ noise = quiet_mirroring()
+
+ for cp in cp_all:
+ for tree_data in porttrees_data:
+ for cpv in portdb.cp_list(cp, mytree=tree_data.path):
+ tree_data.valid_nodes.add(cpv)
+ try:
+ src = tree_data.src_db[cpv]
+ except KeyError as e:
+ noise.missing_entry(cpv)
+ del e
+ continue
+ except CacheError as ce:
+ noise.exception(cpv, ce)
+ del ce
+ continue
+
+ eapi = src.get('EAPI')
+ if not eapi:
+ eapi = '0'
+ eapi = eapi.lstrip('-')
+ eapi_supported = eapi_is_supported(eapi)
+ if not eapi_supported:
+ if not _validate_cache_for_unsupported_eapis:
+ noise.misc(cpv, "unable to validate " + \
+ "cache for EAPI='%s'" % eapi)
+ continue
+
+ dest = None
+ try:
+ dest = tree_data.dest_db[cpv]
+ except (KeyError, CacheError):
+ pass
+
+ for d in (src, dest):
+ if d is not None and d.get('EAPI') in ('', '0'):
+ del d['EAPI']
+
+ if dest is not None:
+ if not (dest['_mtime_'] == src['_mtime_'] and \
+ tree_data.eclass_db.is_eclass_data_valid(
+ dest['_eclasses_']) and \
+ set(dest['_eclasses_']) == set(src['_eclasses_'])):
+ dest = None
+ else:
+ # We don't want to skip the write unless we're really
+ # sure that the existing cache is identical, so don't
+ # trust _mtime_ and _eclasses_ alone.
+ for k in set(chain(src, dest)).difference(
+ ('_mtime_', '_eclasses_')):
+ if dest.get(k, '') != src.get(k, ''):
+ dest = None
+ break
+
+ if dest is not None:
+ # The existing data is valid and identical,
+ # so there's no need to overwrite it.
+ continue
+
+ try:
+ inherited = src.get('INHERITED', '')
+ eclasses = src.get('_eclasses_')
+ except CacheError as ce:
+ noise.exception(cpv, ce)
+ del ce
+ continue
+
+ if eclasses is not None:
+ if not tree_data.eclass_db.is_eclass_data_valid(
+ src['_eclasses_']):
+ noise.eclass_stale(cpv)
+ continue
+ inherited = eclasses
+ else:
+ inherited = inherited.split()
+
+ if tree_data.src_db.complete_eclass_entries and \
+ eclasses is None:
+ noise.corruption(cpv, "missing _eclasses_ field")
+ continue
+
+ if inherited:
+ # Even if _eclasses_ already exists, replace it with data from
+ # eclass_cache, in order to insert local eclass paths.
+ try:
+ eclasses = tree_data.eclass_db.get_eclass_data(inherited)
+ except KeyError:
+ # INHERITED contains a non-existent eclass.
+ noise.eclass_stale(cpv)
+ continue
+
+ if eclasses is None:
+ noise.eclass_stale(cpv)
+ continue
+ src['_eclasses_'] = eclasses
+ else:
+ src['_eclasses_'] = {}
+
+ if not eapi_supported:
+ src = {
+ 'EAPI' : '-' + eapi,
+ '_mtime_' : src['_mtime_'],
+ '_eclasses_' : src['_eclasses_'],
+ }
+
+ try:
+ tree_data.dest_db[cpv] = src
+ except CacheError as ce:
+ noise.exception(cpv, ce)
+ del ce
+
+ curval += 1
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ for tree_data in porttrees_data:
+ try:
+ dead_nodes = set(tree_data.dest_db)
+ except CacheError as e:
+ writemsg_level("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (tree_data.path, e),
+ level=logging.ERROR, noiselevel=-1)
+ del e
+ else:
+ dead_nodes.difference_update(tree_data.valid_nodes)
+ for cpv in dead_nodes:
+ try:
+ del tree_data.dest_db[cpv]
+ except (KeyError, CacheError):
+ pass
+
+ if not quiet:
+ # make sure the final progress is displayed
+ progressHandler.display()
+ print()
+ signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+
+ sys.stdout.flush()
+ os.umask(old_umask)
+
+def action_regen(settings, portdb, max_jobs, max_load):
+ xterm_titles = "notitles" not in settings.features
+ emergelog(xterm_titles, " === regen")
+ #regenerate cache entries
+ try:
+ os.close(sys.stdin.fileno())
+ except SystemExit as e:
+ raise # Needed else can't exit
+ except:
+ pass
+ sys.stdout.flush()
+
+ regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
+ received_signal = []
+
+ def emergeexitsig(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+ {"signal":signum})
+ regen.terminate()
+ received_signal.append(128 + signum)
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, emergeexitsig)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, emergeexitsig)
+
+ try:
+ regen.run()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ if received_signal:
+ sys.exit(received_signal[0])
+
+ portage.writemsg_stdout("done!\n")
+ return regen.returncode
+
+def action_search(root_config, myopts, myfiles, spinner):
+ if not myfiles:
+ print("emerge: no search terms provided.")
+ else:
+ searchinstance = search(root_config,
+ spinner, "--searchdesc" in myopts,
+ "--quiet" not in myopts, "--usepkg" in myopts,
+ "--usepkgonly" in myopts)
+ for mysearch in myfiles:
+ try:
+ searchinstance.execute(mysearch)
+ except re.error as comment:
+ print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
+ sys.exit(1)
+ searchinstance.output()
+
+def action_sync(settings, trees, mtimedb, myopts, myaction):
+ enter_invalid = '--ask-enter-invalid' in myopts
+ xterm_titles = "notitles" not in settings.features
+ emergelog(xterm_titles, " === sync")
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ myportdir = portdb.porttree_root
+ if not myportdir:
+ myportdir = settings.get('PORTDIR', '')
+ if myportdir and myportdir.strip():
+ myportdir = os.path.realpath(myportdir)
+ else:
+ myportdir = None
+ out = portage.output.EOutput()
+ global_config_path = GLOBAL_CONFIG_PATH
+ if settings['EPREFIX']:
+ global_config_path = os.path.join(settings['EPREFIX'],
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ if not myportdir:
+ sys.stderr.write("!!! PORTDIR is undefined. " + \
+ "Is %s/make.globals missing?\n" % global_config_path)
+ sys.exit(1)
+ if myportdir[-1]=="/":
+ myportdir=myportdir[:-1]
+ try:
+ st = os.stat(myportdir)
+ except OSError:
+ st = None
+ if st is None:
+ print(">>>",myportdir,"not found, creating it.")
+ portage.util.ensure_dirs(myportdir, mode=0o755)
+ st = os.stat(myportdir)
+
+ usersync_uid = None
+ spawn_kwargs = {}
+ spawn_kwargs["env"] = settings.environ()
+ if 'usersync' in settings.features and \
+ portage.data.secpass >= 2 and \
+ (st.st_uid != os.getuid() and st.st_mode & 0o700 or \
+ st.st_gid != os.getgid() and st.st_mode & 0o070):
+ try:
+ homedir = pwd.getpwuid(st.st_uid).pw_dir
+ except KeyError:
+ pass
+ else:
+ # Drop privileges when syncing, in order to match
+ # existing uid/gid settings.
+ usersync_uid = st.st_uid
+ spawn_kwargs["uid"] = st.st_uid
+ spawn_kwargs["gid"] = st.st_gid
+ spawn_kwargs["groups"] = [st.st_gid]
+ spawn_kwargs["env"]["HOME"] = homedir
+ umask = 0o002
+ if not st.st_mode & 0o020:
+ umask = umask | 0o020
+ spawn_kwargs["umask"] = umask
+
+ if usersync_uid is not None:
+ # PORTAGE_TMPDIR is used below, so validate it and
+ # bail out if necessary.
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ return rval
+
+ syncuri = settings.get("SYNC", "").strip()
+ if not syncuri:
+ writemsg_level("!!! SYNC is undefined. " + \
+ "Is %s/make.globals missing?\n" % global_config_path,
+ noiselevel=-1, level=logging.ERROR)
+ return 1
+
+ vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
+ vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
+
+ os.umask(0o022)
+ dosyncuri = syncuri
+ updatecache_flg = False
+ if myaction == "metadata":
+ print("skipping sync")
+ updatecache_flg = True
+ elif ".git" in vcs_dirs:
+ # Update existing git repository, and ignore the syncuri. We are
+ # going to trust the user and assume that the user is in the branch
+ # that he/she wants updated. We'll let the user manage branches with
+ # git directly.
+ if portage.process.find_binary("git") is None:
+ msg = ["Command not found: git",
+ "Type \"emerge dev-util/git\" to enable git support."]
+ for l in msg:
+ writemsg_level("!!! %s\n" % l,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ msg = ">>> Starting git pull in %s..." % myportdir
+ emergelog(xterm_titles, msg )
+ writemsg_level(msg + "\n")
+ exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
+ (portage._shell_quote(myportdir),), **spawn_kwargs)
+ if exitcode != os.EX_OK:
+ msg = "!!! git pull error in %s." % myportdir
+ emergelog(xterm_titles, msg)
+ writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
+ return exitcode
+ msg = ">>> Git pull in %s successful" % myportdir
+ emergelog(xterm_titles, msg)
+ writemsg_level(msg + "\n")
+ exitcode = git_sync_timestamps(settings, myportdir)
+ if exitcode == os.EX_OK:
+ updatecache_flg = True
+ elif syncuri[:8]=="rsync://" or syncuri[:6]=="ssh://":
+ for vcs_dir in vcs_dirs:
+ writemsg_level(("!!! %s appears to be under revision " + \
+ "control (contains %s).\n!!! Aborting rsync sync.\n") % \
+ (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
+ return 1
+ if not os.path.exists("/usr/bin/rsync"):
+ print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
+ print("!!! Type \"emerge net-misc/rsync\" to enable rsync support.")
+ sys.exit(1)
+ mytimeout=180
+
+ rsync_opts = []
+ if settings["PORTAGE_RSYNC_OPTS"] == "":
+ portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
+ rsync_opts.extend([
+ "--recursive", # Recurse directories
+ "--links", # Consider symlinks
+ "--safe-links", # Ignore links outside of tree
+ "--perms", # Preserve permissions
+ "--times", # Preserive mod times
+ "--compress", # Compress the data transmitted
+ "--force", # Force deletion on non-empty dirs
+ "--whole-file", # Don't do block transfers, only entire files
+ "--delete", # Delete files that aren't in the master tree
+ "--stats", # Show final statistics about what was transfered
+ "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
+ "--exclude=/distfiles", # Exclude distfiles from consideration
+ "--exclude=/local", # Exclude local from consideration
+ "--exclude=/packages", # Exclude packages from consideration
+ ])
+
+ else:
+ # The below validation is not needed when using the above hardcoded
+ # defaults.
+
+ portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
+ rsync_opts.extend(portage.util.shlex_split(
+ settings.get("PORTAGE_RSYNC_OPTS", "")))
+ for opt in ("--recursive", "--times"):
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + " adding required option " + \
+ "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
+ rsync_opts.append(opt)
+
+ for exclude in ("distfiles", "local", "packages"):
+ opt = "--exclude=/%s" % exclude
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + \
+ " adding required option %s not included in " % opt + \
+ "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
+ rsync_opts.append(opt)
+
+ if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
+ def rsync_opt_startswith(opt_prefix):
+ for x in rsync_opts:
+ if x.startswith(opt_prefix):
+ return True
+ return False
+
+ if not rsync_opt_startswith("--timeout="):
+ rsync_opts.append("--timeout=%d" % mytimeout)
+
+ for opt in ("--compress", "--whole-file"):
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + " adding required option " + \
+ "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
+ rsync_opts.append(opt)
+
+ if "--quiet" in myopts:
+ rsync_opts.append("--quiet") # Shut up a lot
+ else:
+ rsync_opts.append("--verbose") # Print filelist
+
+ if "--verbose" in myopts:
+ rsync_opts.append("--progress") # Progress meter for each file
+
+ if "--debug" in myopts:
+ rsync_opts.append("--checksum") # Force checksum on all files
+
+ # Real local timestamp file.
+ servertimestampfile = os.path.join(
+ myportdir, "metadata", "timestamp.chk")
+
+ content = portage.util.grabfile(servertimestampfile)
+ mytimestamp = 0
+ if content:
+ try:
+ mytimestamp = time.mktime(time.strptime(content[0],
+ "%a, %d %b %Y %H:%M:%S +0000"))
+ except (OverflowError, ValueError):
+ pass
+ del content
+
+ try:
+ rsync_initial_timeout = \
+ int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
+ except ValueError:
+ rsync_initial_timeout = 15
+
+ try:
+ maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
+ except SystemExit as e:
+ raise # Needed else can't exit
+ except:
+ maxretries = -1 #default number of retries
+
+ retries=0
+ try:
+ proto, user_name, hostname, port = re.split(
+ r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
+ syncuri, maxsplit=4)[1:5]
+ except ValueError:
+ writemsg_level("!!! SYNC is invalid: %s\n" % syncuri,
+ noiselevel=-1, level=logging.ERROR)
+ return 1
+ if port is None:
+ port=""
+ if user_name is None:
+ user_name=""
+ if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
+ getaddrinfo_host = hostname
+ else:
+ # getaddrinfo needs the brackets stripped
+ getaddrinfo_host = hostname[1:-1]
+ updatecache_flg=True
+ all_rsync_opts = set(rsync_opts)
+ extra_rsync_opts = portage.util.shlex_split(
+ settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
+ all_rsync_opts.update(extra_rsync_opts)
+
+ family = socket.AF_UNSPEC
+ if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
+ family = socket.AF_INET
+ elif socket.has_ipv6 and \
+ ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
+ family = socket.AF_INET6
+
+ addrinfos = None
+ uris = []
+
+ try:
+ addrinfos = getaddrinfo_validate(
+ socket.getaddrinfo(getaddrinfo_host, None,
+ family, socket.SOCK_STREAM))
+ except socket.error as e:
+ writemsg_level(
+ "!!! getaddrinfo failed for '%s': %s\n" % (hostname, e),
+ noiselevel=-1, level=logging.ERROR)
+
+ if addrinfos:
+
+ AF_INET = socket.AF_INET
+ AF_INET6 = None
+ if socket.has_ipv6:
+ AF_INET6 = socket.AF_INET6
+
+ ips_v4 = []
+ ips_v6 = []
+
+ for addrinfo in addrinfos:
+ if addrinfo[0] == AF_INET:
+ ips_v4.append("%s" % addrinfo[4][0])
+ elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
+ # IPv6 addresses need to be enclosed in square brackets
+ ips_v6.append("[%s]" % addrinfo[4][0])
+
+ random.shuffle(ips_v4)
+ random.shuffle(ips_v6)
+
+ # Give priority to the address family that
+ # getaddrinfo() returned first.
+ if AF_INET6 is not None and addrinfos and \
+ addrinfos[0][0] == AF_INET6:
+ ips = ips_v6 + ips_v4
+ else:
+ ips = ips_v4 + ips_v6
+
+ for ip in ips:
+ uris.append(syncuri.replace(
+ "//" + user_name + hostname + port + "/",
+ "//" + user_name + ip + port + "/", 1))
+
+ if not uris:
+ # With some configurations we need to use the plain hostname
+ # rather than try to resolve the ip addresses (bug #340817).
+ uris.append(syncuri)
+
+ # reverse, for use with pop()
+ uris.reverse()
+
+ effective_maxretries = maxretries
+ if effective_maxretries < 0:
+ effective_maxretries = len(uris) - 1
+
+ SERVER_OUT_OF_DATE = -1
+ EXCEEDED_MAX_RETRIES = -2
+ while (1):
+ if uris:
+ dosyncuri = uris.pop()
+ else:
+ writemsg("!!! Exhausted addresses for %s\n" % \
+ hostname, noiselevel=-1)
+ return 1
+
+ if (retries==0):
+ if "--ask" in myopts:
+ if userquery("Do you want to sync your Portage tree " + \
+ "with the mirror at\n" + blue(dosyncuri) + bold("?"),
+ enter_invalid) == "No":
+ print()
+ print("Quitting.")
+ print()
+ sys.exit(0)
+ emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
+ if "--quiet" not in myopts:
+ print(">>> Starting rsync with "+dosyncuri+"...")
+ else:
+ emergelog(xterm_titles,
+ ">>> Starting retry %d of %d with %s" % \
+ (retries, effective_maxretries, dosyncuri))
+ writemsg_stdout(
+ "\n\n>>> Starting retry %d of %d with %s\n" % \
+ (retries, effective_maxretries, dosyncuri), noiselevel=-1)
+
+ if dosyncuri.startswith('ssh://'):
+ dosyncuri = dosyncuri[6:].replace('/', ':/', 1)
+
+ if mytimestamp != 0 and "--quiet" not in myopts:
+ print(">>> Checking server timestamp ...")
+
+ rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
+
+ if "--debug" in myopts:
+ print(rsynccommand)
+
+ exitcode = os.EX_OK
+ servertimestamp = 0
+ # Even if there's no timestamp available locally, fetch the
+ # timestamp anyway as an initial probe to verify that the server is
+ # responsive. This protects us from hanging indefinitely on a
+ # connection attempt to an unresponsive server which rsync's
+ # --timeout option does not prevent.
+ if True:
+ # Temporary file for remote server timestamp comparison.
+ # NOTE: If FEATURES=usersync is enabled then the tempfile
+ # needs to be in a directory that's readable by the usersync
+ # user. We assume that PORTAGE_TMPDIR will satisfy this
+ # requirement, since that's not necessarily true for the
+ # default directory used by the tempfile module.
+ if usersync_uid is not None:
+ tmpdir = settings['PORTAGE_TMPDIR']
+ else:
+ # use default dir from tempfile module
+ tmpdir = None
+ fd, tmpservertimestampfile = \
+ tempfile.mkstemp(dir=tmpdir)
+ os.close(fd)
+ if usersync_uid is not None:
+ portage.util.apply_permissions(tmpservertimestampfile,
+ uid=usersync_uid)
+ mycommand = rsynccommand[:]
+ mycommand.append(dosyncuri.rstrip("/") + \
+ "/metadata/timestamp.chk")
+ mycommand.append(tmpservertimestampfile)
+ content = None
+ mypids = []
+ try:
+ # Timeout here in case the server is unresponsive. The
+ # --timeout rsync option doesn't apply to the initial
+ # connection attempt.
+ try:
+ if rsync_initial_timeout:
+ portage.exception.AlarmSignal.register(
+ rsync_initial_timeout)
+
+ mypids.extend(portage.process.spawn(
+ mycommand, returnpid=True, **spawn_kwargs))
+ exitcode = os.waitpid(mypids[0], 0)[1]
+ if usersync_uid is not None:
+ portage.util.apply_permissions(tmpservertimestampfile,
+ uid=os.getuid())
+ content = portage.grabfile(tmpservertimestampfile)
+ finally:
+ if rsync_initial_timeout:
+ portage.exception.AlarmSignal.unregister()
+ try:
+ os.unlink(tmpservertimestampfile)
+ except OSError:
+ pass
+ except portage.exception.AlarmSignal:
+ # timed out
+ print('timed out')
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ if mypids and os.waitpid(mypids[0], os.WNOHANG)[0] == 0:
+ os.kill(mypids[0], signal.SIGTERM)
+ os.waitpid(mypids[0], 0)
+ # This is the same code rsync uses for timeout.
+ exitcode = 30
+ else:
+ if exitcode != os.EX_OK:
+ if exitcode & 0xff:
+ exitcode = (exitcode & 0xff) << 8
+ else:
+ exitcode = exitcode >> 8
+ if mypids:
+ portage.process.spawned_pids.remove(mypids[0])
+ if content:
+ try:
+ servertimestamp = time.mktime(time.strptime(
+ content[0], "%a, %d %b %Y %H:%M:%S +0000"))
+ except (OverflowError, ValueError):
+ pass
+ del mycommand, mypids, content
+ if exitcode == os.EX_OK:
+ if (servertimestamp != 0) and (servertimestamp == mytimestamp):
+ emergelog(xterm_titles,
+ ">>> Cancelling sync -- Already current.")
+ print()
+ print(">>>")
+ print(">>> Timestamps on the server and in the local repository are the same.")
+ print(">>> Cancelling all further sync action. You are already up to date.")
+ print(">>>")
+ print(">>> In order to force sync, remove '%s'." % servertimestampfile)
+ print(">>>")
+ print()
+ sys.exit(0)
+ elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
+ emergelog(xterm_titles,
+ ">>> Server out of date: %s" % dosyncuri)
+ print()
+ print(">>>")
+ print(">>> SERVER OUT OF DATE: %s" % dosyncuri)
+ print(">>>")
+ print(">>> In order to force sync, remove '%s'." % servertimestampfile)
+ print(">>>")
+ print()
+ exitcode = SERVER_OUT_OF_DATE
+ elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
+ # actual sync
+ mycommand = rsynccommand + [dosyncuri+"/", myportdir]
+ exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
+ if exitcode in [0,1,3,4,11,14,20,21]:
+ break
+ elif exitcode in [1,3,4,11,14,20,21]:
+ break
+ else:
+ # Code 2 indicates protocol incompatibility, which is expected
+ # for servers with protocol < 29 that don't support
+ # --prune-empty-directories. Retry for a server that supports
+ # at least rsync protocol version 29 (>=rsync-2.6.4).
+ pass
+
+ retries=retries+1
+
+ if maxretries < 0 or retries <= maxretries:
+ print(">>> Retrying...")
+ else:
+ # over retries
+ # exit loop
+ updatecache_flg=False
+ exitcode = EXCEEDED_MAX_RETRIES
+ break
+
+ if (exitcode==0):
+ emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
+ elif exitcode == SERVER_OUT_OF_DATE:
+ sys.exit(1)
+ elif exitcode == EXCEEDED_MAX_RETRIES:
+ sys.stderr.write(
+ ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
+ sys.exit(1)
+ elif (exitcode>0):
+ msg = []
+ if exitcode==1:
+ msg.append("Rsync has reported that there is a syntax error. Please ensure")
+ msg.append("that your SYNC statement is proper.")
+ msg.append("SYNC=" + settings["SYNC"])
+ elif exitcode==11:
+ msg.append("Rsync has reported that there is a File IO error. Normally")
+ msg.append("this means your disk is full, but can be caused by corruption")
+ msg.append("on the filesystem that contains PORTDIR. Please investigate")
+ msg.append("and try again after the problem has been fixed.")
+ msg.append("PORTDIR=" + settings["PORTDIR"])
+ elif exitcode==20:
+ msg.append("Rsync was killed before it finished.")
+ else:
+ msg.append("Rsync has not successfully finished. It is recommended that you keep")
+ msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
+ msg.append("to use rsync due to firewall or other restrictions. This should be a")
+ msg.append("temporary problem unless complications exist with your network")
+ msg.append("(and possibly your system's filesystem) configuration.")
+ for line in msg:
+ out.eerror(line)
+ sys.exit(exitcode)
+ elif syncuri[:6]=="cvs://":
+ if not os.path.exists("/usr/bin/cvs"):
+ print("!!! /usr/bin/cvs does not exist, so CVS support is disabled.")
+ print("!!! Type \"emerge dev-vcs/cvs\" to enable CVS support.")
+ sys.exit(1)
+ cvsroot=syncuri[6:]
+ cvsdir=os.path.dirname(myportdir)
+ if not os.path.exists(myportdir+"/CVS"):
+ #initial checkout
+ print(">>> Starting initial cvs checkout with "+syncuri+"...")
+ if os.path.exists(cvsdir+"/gentoo-x86"):
+ print("!!! existing",cvsdir+"/gentoo-x86 directory; exiting.")
+ sys.exit(1)
+ try:
+ os.rmdir(myportdir)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ sys.stderr.write(
+ "!!! existing '%s' directory; exiting.\n" % myportdir)
+ sys.exit(1)
+ del e
+ if portage.process.spawn_bash(
+ "cd %s; exec cvs -z0 -d %s co -P gentoo-x86" % \
+ (portage._shell_quote(cvsdir), portage._shell_quote(cvsroot)),
+ **spawn_kwargs) != os.EX_OK:
+ print("!!! cvs checkout error; exiting.")
+ sys.exit(1)
+ os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
+ else:
+ #cvs update
+ print(">>> Starting cvs update with "+syncuri+"...")
+ retval = portage.process.spawn_bash(
+ "cd %s; exec cvs -z0 -q update -dP" % \
+ (portage._shell_quote(myportdir),), **spawn_kwargs)
+ if retval != os.EX_OK:
+ writemsg_level("!!! cvs update error; exiting.\n",
+ noiselevel=-1, level=logging.ERROR)
+ sys.exit(retval)
+ dosyncuri = syncuri
+ else:
+ writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
+ noiselevel=-1, level=logging.ERROR)
+ return 1
+
+ if updatecache_flg and \
+ myaction != "metadata" and \
+ "metadata-transfer" not in settings.features:
+ updatecache_flg = False
+
+ # Reload the whole config from scratch.
+ settings, trees, mtimedb = load_emerge_config(trees=trees)
+ adjust_configs(myopts, trees)
+ root_config = trees[settings["ROOT"]]["root_config"]
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+
+ if updatecache_flg and \
+ os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
+
+ # Only update cache for myportdir since that's
+ # the only one that's been synced here.
+ action_metadata(settings, portdb, myopts, porttrees=[myportdir])
+
+ if myopts.get('--package-moves') != 'n' and \
+ _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
+ mtimedb.commit()
+ # Reload the whole config from scratch.
+ settings, trees, mtimedb = load_emerge_config(trees=trees)
+ adjust_configs(myopts, trees)
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ root_config = trees[settings["ROOT"]]["root_config"]
+
+ mybestpv = portdb.xmatch("bestmatch-visible",
+ portage.const.PORTAGE_PACKAGE_ATOM)
+ mypvs = portage.best(
+ trees[settings["ROOT"]]["vartree"].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM))
+
+ chk_updated_cfg_files(settings["EROOT"],
+ portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")))
+
+ if myaction != "metadata":
+ postsync = os.path.join(settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, "bin", "post_sync")
+ if os.access(postsync, os.X_OK):
+ retval = portage.process.spawn(
+ [postsync, dosyncuri], env=settings.environ())
+ if retval != os.EX_OK:
+ writemsg_level(
+ " %s spawn failed of %s\n" % (bad("*"), postsync,),
+ level=logging.ERROR, noiselevel=-1)
+
+ if(mybestpv != mypvs) and not "--quiet" in myopts:
+ print()
+ print(red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
+ print(red(" * ")+"that you update portage now, before any other packages are updated.")
+ print()
+ print(red(" * ")+"To update portage, run 'emerge portage' now.")
+ print()
+
+ display_news_notification(root_config, myopts)
+ return os.EX_OK
+
+def action_uninstall(settings, trees, ldpath_mtimes,
+ opts, action, files, spinner):
+ # For backward compat, some actions do not require leading '='.
+ ignore_missing_eq = action in ('clean', 'unmerge')
+ root = settings['ROOT']
+ vardb = trees[root]['vartree'].dbapi
+ valid_atoms = []
+ lookup_owners = []
+
+ # Ensure atoms are valid before calling unmerge().
+ # For backward compat, leading '=' is not required.
+ for x in files:
+ if is_valid_package_atom(x, allow_repo=True) or \
+ (ignore_missing_eq and is_valid_package_atom('=' + x)):
+
+ try:
+ atom = dep_expand(x, mydb=vardb, settings=settings)
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ else:
+ if atom.use and atom.use.conditional:
+ writemsg_level(
+ ("\n\n!!! '%s' contains a conditional " + \
+ "which is not allowed.\n") % (x,),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level(
+ "!!! Please check ebuild(5) for full details.\n",
+ level=logging.ERROR)
+ return 1
+ valid_atoms.append(atom)
+
+ elif x.startswith(os.sep):
+ if not x.startswith(root):
+ writemsg_level(("!!! '%s' does not start with" + \
+ " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
+ return 1
+ # Queue these up since it's most efficient to handle
+ # multiple files in a single iter_owners() call.
+ lookup_owners.append(x)
+
+ elif x.startswith(SETPREFIX) and action == "deselect":
+ valid_atoms.append(x)
+
+ elif "*" in x:
+ try:
+ ext_atom = Atom(x, allow_repo=True, allow_wildcard=True)
+ except InvalidAtom:
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ for cp in vardb.cp_all():
+ if extended_cp_match(ext_atom.cp, cp):
+ atom = cp
+ if ext_atom.slot:
+ atom += ":" + ext_atom.slot
+ if ext_atom.repo:
+ atom += "::" + ext_atom.repo
+
+ if vardb.match(atom):
+ valid_atoms.append(Atom(atom, allow_repo=True))
+
+ else:
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if lookup_owners:
+ relative_paths = []
+ search_for_multiple = False
+ if len(lookup_owners) > 1:
+ search_for_multiple = True
+
+ for x in lookup_owners:
+ if not search_for_multiple and os.path.isdir(x):
+ search_for_multiple = True
+ relative_paths.append(x[len(root)-1:])
+
+ owners = set()
+ for pkg, relative_path in \
+ vardb._owners.iter_owners(relative_paths):
+ owners.add(pkg.mycpv)
+ if not search_for_multiple:
+ break
+
+ if owners:
+ for cpv in owners:
+ slot = vardb.aux_get(cpv, ['SLOT'])[0]
+ if not slot:
+ # portage now masks packages with missing slot, but it's
+ # possible that one was installed by an older version
+ atom = portage.cpv_getkey(cpv)
+ else:
+ atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
+ valid_atoms.append(portage.dep.Atom(atom))
+ else:
+ writemsg_level(("!!! '%s' is not claimed " + \
+ "by any package.\n") % lookup_owners[0],
+ level=logging.WARNING, noiselevel=-1)
+
+ if files and not valid_atoms:
+ return 1
+
+ if action == 'unmerge' and \
+ '--quiet' not in opts and \
+ '--quiet-unmerge-warn' not in opts:
+ msg = "This action can remove important packages! " + \
+ "In order to be safer, use " + \
+ "`emerge -pv --depclean <atom>` to check for " + \
+ "reverse dependencies before removing packages."
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+ if action == 'deselect':
+ return action_deselect(settings, trees, opts, valid_atoms)
+
+ # Create a Scheduler for calls to unmerge(), in order to cause
+ # redirection of ebuild phase output to logs as required for
+ # options such as --quiet.
+ sched = Scheduler(settings, trees, None, opts,
+ spinner)
+ sched._background = sched._background_mode()
+ sched._status_display.quiet = True
+
+ if sched._background:
+ sched.settings.unlock()
+ sched.settings["PORTAGE_BACKGROUND"] = "1"
+ sched.settings.backup_changes("PORTAGE_BACKGROUND")
+ sched.settings.lock()
+ sched.pkgsettings[root] = portage.config(clone=sched.settings)
+
+ if action in ('clean', 'unmerge') or \
+ (action == 'prune' and "--nodeps" in opts):
+ # When given a list of atoms, unmerge them in the order given.
+ ordered = action == 'unmerge'
+ unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
+ valid_atoms, ldpath_mtimes, ordered=ordered,
+ scheduler=sched._sched_iface)
+ rval = os.EX_OK
+ else:
+ rval = action_depclean(settings, trees, ldpath_mtimes,
+ opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
+
+ return rval
+
+def adjust_configs(myopts, trees):
+ for myroot in trees:
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+
+def adjust_config(myopts, settings):
+ """Make emerge specific adjustments to the config."""
+
+ # Kill noauto as it will break merges otherwise.
+ if "noauto" in settings.features:
+ settings.features.remove('noauto')
+
+ fail_clean = myopts.get('--fail-clean')
+ if fail_clean is not None:
+ if fail_clean is True and \
+ 'fail-clean' not in settings.features:
+ settings.features.add('fail-clean')
+ elif fail_clean == 'n' and \
+ 'fail-clean' in settings.features:
+ settings.features.remove('fail-clean')
+
+ CLEAN_DELAY = 5
+ try:
+ CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
+ settings["CLEAN_DELAY"], noiselevel=-1)
+ settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
+ settings.backup_changes("CLEAN_DELAY")
+
+ EMERGE_WARNING_DELAY = 10
+ try:
+ EMERGE_WARNING_DELAY = int(settings.get(
+ "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
+ settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
+ settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
+ settings.backup_changes("EMERGE_WARNING_DELAY")
+
+ if "--quiet" in myopts or "--quiet-build" in myopts:
+ settings["PORTAGE_QUIET"]="1"
+ settings.backup_changes("PORTAGE_QUIET")
+
+ if "--verbose" in myopts:
+ settings["PORTAGE_VERBOSE"] = "1"
+ settings.backup_changes("PORTAGE_VERBOSE")
+
+ # Set so that configs will be merged regardless of remembered status
+ if ("--noconfmem" in myopts):
+ settings["NOCONFMEM"]="1"
+ settings.backup_changes("NOCONFMEM")
+
+ # Set various debug markers... They should be merged somehow.
+ PORTAGE_DEBUG = 0
+ try:
+ PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
+ if PORTAGE_DEBUG not in (0, 1):
+ portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
+ PORTAGE_DEBUG, noiselevel=-1)
+ portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
+ noiselevel=-1)
+ PORTAGE_DEBUG = 0
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
+ settings["PORTAGE_DEBUG"], noiselevel=-1)
+ del e
+ if "--debug" in myopts:
+ PORTAGE_DEBUG = 1
+ settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
+ settings.backup_changes("PORTAGE_DEBUG")
+
+ if settings.get("NOCOLOR") not in ("yes","true"):
+ portage.output.havecolor = 1
+
+ """The explicit --color < y | n > option overrides the NOCOLOR environment
+ variable and stdout auto-detection."""
+ if "--color" in myopts:
+ if "y" == myopts["--color"]:
+ portage.output.havecolor = 1
+ settings["NOCOLOR"] = "false"
+ else:
+ portage.output.havecolor = 0
+ settings["NOCOLOR"] = "true"
+ settings.backup_changes("NOCOLOR")
+ elif settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ portage.output.havecolor = 0
+ settings["NOCOLOR"] = "true"
+ settings.backup_changes("NOCOLOR")
+
+def display_missing_pkg_set(root_config, set_name):
+
+ msg = []
+ msg.append(("emerge: There are no sets to satisfy '%s'. " + \
+ "The following sets exist:") % \
+ colorize("INFORM", set_name))
+ msg.append("")
+
+ for s in sorted(root_config.sets):
+ msg.append(" %s" % s)
+ msg.append("")
+
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.ERROR, noiselevel=-1)
+
+def relative_profile_path(portdir, abs_profile):
+ realpath = os.path.realpath(abs_profile)
+ basepath = os.path.realpath(os.path.join(portdir, "profiles"))
+ if realpath.startswith(basepath):
+ profilever = realpath[1 + len(basepath):]
+ else:
+ profilever = None
+ return profilever
+
+def getportageversion(portdir, target_root, profile, chost, vardb):
+ profilever = None
+ if profile:
+ profilever = relative_profile_path(portdir, profile)
+ if profilever is None:
+ try:
+ for parent in portage.grabfile(
+ os.path.join(profile, 'parent')):
+ profilever = relative_profile_path(portdir,
+ os.path.join(profile, parent))
+ if profilever is not None:
+ break
+ except portage.exception.PortageException:
+ pass
+
+ if profilever is None:
+ try:
+ profilever = "!" + os.readlink(profile)
+ except (OSError):
+ pass
+
+ if profilever is None:
+ profilever = "unavailable"
+
+ libcver = []
+ libclist = set()
+ for atom in expand_new_virt(vardb, portage.const.LIBC_PACKAGE_ATOM):
+ if not atom.blocker:
+ libclist.update(vardb.match(atom))
+ if libclist:
+ for cpv in sorted(libclist):
+ libc_split = portage.catpkgsplit(cpv)[1:]
+ if libc_split[-1] == "r0":
+ libc_split[:-1]
+ libcver.append("-".join(libc_split))
+ else:
+ libcver = ["unavailable"]
+
+ gccver = getgccversion(chost)
+ unameout=platform.release()+" "+platform.machine()
+
+ return "Portage %s (%s, %s, %s, %s)" % \
+ (portage.VERSION, profilever, gccver, ",".join(libcver), unameout)
+
+def git_sync_timestamps(settings, portdir):
+ """
+ Since git doesn't preserve timestamps, synchronize timestamps between
+ entries and ebuilds/eclasses. Assume the cache has the correct timestamp
+ for a given file as long as the file in the working tree is not modified
+ (relative to HEAD).
+ """
+ cache_dir = os.path.join(portdir, "metadata", "cache")
+ if not os.path.isdir(cache_dir):
+ return os.EX_OK
+ writemsg_level(">>> Synchronizing timestamps...\n")
+
+ from portage.cache.cache_errors import CacheError
+ try:
+ cache_db = settings.load_best_module("portdbapi.metadbmodule")(
+ portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
+ except CacheError as e:
+ writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ ec_dir = os.path.join(portdir, "eclass")
+ try:
+ ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
+ if f.endswith(".eclass"))
+ except OSError as e:
+ writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ args = [portage.const.BASH_BINARY, "-c",
+ "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
+ portage._shell_quote(portdir)]
+ import subprocess
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ modified_files = set(_unicode_decode(l).rstrip("\n") for l in proc.stdout)
+ rval = proc.wait()
+ if rval != os.EX_OK:
+ return rval
+
+ modified_eclasses = set(ec for ec in ec_names \
+ if os.path.join("eclass", ec + ".eclass") in modified_files)
+
+ updated_ec_mtimes = {}
+
+ for cpv in cache_db:
+ cpv_split = portage.catpkgsplit(cpv)
+ if cpv_split is None:
+ writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ cat, pn, ver, rev = cpv_split
+ cat, pf = portage.catsplit(cpv)
+ relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
+ if relative_eb_path in modified_files:
+ continue
+
+ try:
+ cache_entry = cache_db[cpv]
+ eb_mtime = cache_entry.get("_mtime_")
+ ec_mtimes = cache_entry.get("_eclasses_")
+ except KeyError:
+ writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ except CacheError as e:
+ writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
+ (cpv, e), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if eb_mtime is None:
+ writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ try:
+ eb_mtime = long(eb_mtime)
+ except ValueError:
+ writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
+ (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if ec_mtimes is None:
+ writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if modified_eclasses.intersection(ec_mtimes):
+ continue
+
+ missing_eclasses = set(ec_mtimes).difference(ec_names)
+ if missing_eclasses:
+ writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
+ (cpv, sorted(missing_eclasses)), level=logging.ERROR,
+ noiselevel=-1)
+ continue
+
+ eb_path = os.path.join(portdir, relative_eb_path)
+ try:
+ current_eb_mtime = os.stat(eb_path)
+ except OSError:
+ writemsg_level("!!! Missing ebuild: %s\n" % \
+ (cpv,), level=logging.ERROR, noiselevel=-1)
+ continue
+
+ inconsistent = False
+ for ec, (ec_path, ec_mtime) in ec_mtimes.items():
+ updated_mtime = updated_ec_mtimes.get(ec)
+ if updated_mtime is not None and updated_mtime != ec_mtime:
+ writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
+ (cpv, ec), level=logging.ERROR, noiselevel=-1)
+ inconsistent = True
+ break
+
+ if inconsistent:
+ continue
+
+ if current_eb_mtime != eb_mtime:
+ os.utime(eb_path, (eb_mtime, eb_mtime))
+
+ for ec, (ec_path, ec_mtime) in ec_mtimes.items():
+ if ec in updated_ec_mtimes:
+ continue
+ ec_path = os.path.join(ec_dir, ec + ".eclass")
+ current_mtime = os.stat(ec_path)[stat.ST_MTIME]
+ if current_mtime != ec_mtime:
+ os.utime(ec_path, (ec_mtime, ec_mtime))
+ updated_ec_mtimes[ec] = ec_mtime
+
+ return os.EX_OK
+
+def load_emerge_config(trees=None):
+ kwargs = {}
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+ v = os.environ.get(envvar, None)
+ if v and v.strip():
+ kwargs[k] = v
+ trees = portage.create_trees(trees=trees, **kwargs)
+
+ for root, root_trees in trees.items():
+ settings = root_trees["vartree"].settings
+ settings._init_dirs()
+ setconfig = load_default_config(settings, root_trees)
+ root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
+
+ settings = trees["/"]["vartree"].settings
+
+ for myroot in trees:
+ if myroot != "/":
+ settings = trees[myroot]["vartree"].settings
+ break
+
+ mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
+ mtimedb = portage.MtimeDB(mtimedbfile)
+ portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
+ QueryCommand._db = trees
+ return settings, trees, mtimedb
+
+def chk_updated_cfg_files(eroot, config_protect):
+ target_root = eroot
+ result = list(
+ portage.util.find_updated_config_files(target_root, config_protect))
+
+ for x in result:
+ writemsg_level("\n %s " % (colorize("WARN", "* IMPORTANT:"),),
+ level=logging.INFO, noiselevel=-1)
+ if not x[1]: # it's a protected file
+ writemsg_level("config file '%s' needs updating.\n" % x[0],
+ level=logging.INFO, noiselevel=-1)
+ else: # it's a protected dir
+ if len(x[1]) == 1:
+ head, tail = os.path.split(x[1][0])
+ tail = tail[len("._cfg0000_"):]
+ fpath = os.path.join(head, tail)
+ writemsg_level("config file '%s' needs updating.\n" % fpath,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ writemsg_level("%d config files in '%s' need updating.\n" % \
+ (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
+
+ if result:
+ print(" "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")\
+ + " section of the " + bold("emerge"))
+ print(" "+yellow("*")+" man page to learn how to update config files.")
+
+def display_news_notification(root_config, myopts):
+ target_root = root_config.settings['EROOT']
+ trees = root_config.trees
+ settings = trees["vartree"].settings
+ portdb = trees["porttree"].dbapi
+ vardb = trees["vartree"].dbapi
+ NEWS_PATH = os.path.join("metadata", "news")
+ UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
+ newsReaderDisplay = False
+ update = "--pretend" not in myopts
+ if "news" not in settings.features:
+ return
+
+ for repo in portdb.getRepositories():
+ unreadItems = checkUpdatedNewsItems(
+ portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
+ if unreadItems:
+ if not newsReaderDisplay:
+ newsReaderDisplay = True
+ print()
+ print(colorize("WARN", " * IMPORTANT:"), end=' ')
+ print("%s news items need reading for repository '%s'." % (unreadItems, repo))
+
+
+ if newsReaderDisplay:
+ print(colorize("WARN", " *"), end=' ')
+ print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
+ print()
+
+def getgccversion(chost):
+ """
+ rtype: C{str}
+ return: the current in-use gcc version
+ """
+
+ gcc_ver_command = 'gcc -dumpversion'
+ gcc_ver_prefix = 'gcc-'
+
+ gcc_not_found_error = red(
+ "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
+ "!!! to update the environment of this terminal and possibly\n" +
+ "!!! other terminals also.\n"
+ )
+
+ mystatus, myoutput = subprocess_getstatusoutput("gcc-config -c")
+ if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
+ return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
+
+ mystatus, myoutput = subprocess_getstatusoutput(
+ chost + "-" + gcc_ver_command)
+ if mystatus == os.EX_OK:
+ return gcc_ver_prefix + myoutput
+
+ mystatus, myoutput = subprocess_getstatusoutput(gcc_ver_command)
+ if mystatus == os.EX_OK:
+ return gcc_ver_prefix + myoutput
+
+ portage.writemsg(gcc_not_found_error, noiselevel=-1)
+ return "[unavailable]"
+
+def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
+ update=False):
+ """
+ Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
+ Returns the number of unread (yet relevent) items.
+
+ @param portdb: a portage tree database
+ @type portdb: pordbapi
+ @param vardb: an installed package database
+ @type vardb: vardbapi
+ @param NEWS_PATH:
+ @type NEWS_PATH:
+ @param UNREAD_PATH:
+ @type UNREAD_PATH:
+ @param repo_id:
+ @type repo_id:
+ @rtype: Integer
+ @returns:
+ 1. The number of unread but relevant news items.
+
+ """
+ from portage.news import NewsManager
+ manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
+ return manager.getUnreadItems( repo_id, update=update )
+
diff --git a/portage_with_autodep/pym/_emerge/clear_caches.py b/portage_with_autodep/pym/_emerge/clear_caches.py
new file mode 100644
index 0000000..7b7c5ec
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/clear_caches.py
@@ -0,0 +1,19 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gc
+from portage.util.listdir import dircache
+
+def clear_caches(trees):
+ for d in trees.values():
+ d["porttree"].dbapi.melt()
+ d["porttree"].dbapi._aux_cache.clear()
+ d["bintree"].dbapi._aux_cache.clear()
+ d["bintree"].dbapi._clear_cache()
+ if d["vartree"].dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ d["vartree"].dbapi._linkmap._clear_cache()
+ dircache.clear()
+ gc.collect()
diff --git a/portage_with_autodep/pym/_emerge/countdown.py b/portage_with_autodep/pym/_emerge/countdown.py
new file mode 100644
index 0000000..5abdc8a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/countdown.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+import time
+
+from portage.output import colorize
+
+def countdown(secs=5, doing="Starting"):
+ if secs:
+ print(">>> Waiting",secs,"seconds before starting...")
+ print(">>> (Control-C to abort)...\n"+doing+" in: ", end=' ')
+ ticks=list(range(secs))
+ ticks.reverse()
+ for sec in ticks:
+ sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
+ sys.stdout.flush()
+ time.sleep(1)
+ print()
+
diff --git a/portage_with_autodep/pym/_emerge/create_depgraph_params.py b/portage_with_autodep/pym/_emerge/create_depgraph_params.py
new file mode 100644
index 0000000..44dceda
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/create_depgraph_params.py
@@ -0,0 +1,72 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+from portage.util import writemsg_level
+
+def create_depgraph_params(myopts, myaction):
+ #configure emerge engine parameters
+ #
+ # self: include _this_ package regardless of if it is merged.
+ # selective: exclude the package if it is merged
+ # recurse: go into the dependencies
+ # deep: go into the dependencies of already merged packages
+ # empty: pretend nothing is merged
+ # complete: completely account for all known dependencies
+ # remove: build graph for use in removing packages
+ # rebuilt_binaries: replace installed packages with rebuilt binaries
+ myparams = {"recurse" : True}
+
+ bdeps = myopts.get("--with-bdeps")
+ if bdeps is not None:
+ myparams["bdeps"] = bdeps
+
+ if myaction == "remove":
+ myparams["remove"] = True
+ myparams["complete"] = True
+ myparams["selective"] = True
+ return myparams
+
+ if "--update" in myopts or \
+ "--newuse" in myopts or \
+ "--reinstall" in myopts or \
+ "--noreplace" in myopts or \
+ myopts.get("--selective", "n") != "n":
+ myparams["selective"] = True
+
+ deep = myopts.get("--deep")
+ if deep is not None and deep != 0:
+ myparams["deep"] = deep
+ if ("--complete-graph" in myopts or "--rebuild-if-new-rev" in myopts or
+ "--rebuild-if-new-ver" in myopts or "--rebuild-if-unbuilt" in myopts):
+ myparams["complete"] = True
+ if "--emptytree" in myopts:
+ myparams["empty"] = True
+ myparams["deep"] = True
+ myparams.pop("selective", None)
+
+ if "--nodeps" in myopts:
+ myparams.pop("recurse", None)
+ myparams.pop("deep", None)
+ myparams.pop("complete", None)
+
+ rebuilt_binaries = myopts.get('--rebuilt-binaries')
+ if rebuilt_binaries is True or \
+ rebuilt_binaries != 'n' and \
+ '--usepkgonly' in myopts and \
+ myopts.get('--deep') is True and \
+ '--update' in myopts:
+ myparams['rebuilt_binaries'] = True
+
+ if myopts.get("--selective") == "n":
+ # --selective=n can be used to remove selective
+ # behavior that may have been implied by some
+ # other option like --update.
+ myparams.pop("selective", None)
+
+ if '--debug' in myopts:
+ writemsg_level('\n\nmyparams %s\n\n' % myparams,
+ noiselevel=-1, level=logging.DEBUG)
+
+ return myparams
+
diff --git a/portage_with_autodep/pym/_emerge/create_world_atom.py b/portage_with_autodep/pym/_emerge/create_world_atom.py
new file mode 100644
index 0000000..fa7cffc
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/create_world_atom.py
@@ -0,0 +1,92 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.dep import _repo_separator
+
+def create_world_atom(pkg, args_set, root_config):
+ """Create a new atom for the world file if one does not exist. If the
+ argument atom is precise enough to identify a specific slot then a slot
+ atom will be returned. Atoms that are in the system set may also be stored
+ in world since system atoms can only match one slot while world atoms can
+ be greedy with respect to slots. Unslotted system packages will not be
+ stored in world."""
+
+ arg_atom = args_set.findAtomForPackage(pkg)
+ if not arg_atom:
+ return None
+ cp = arg_atom.cp
+ new_world_atom = cp
+ if arg_atom.repo:
+ new_world_atom += _repo_separator + arg_atom.repo
+ sets = root_config.sets
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+ available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
+ for cpv in portdb.match(cp))
+ slotted = len(available_slots) > 1 or \
+ (len(available_slots) == 1 and "0" not in available_slots)
+ if not slotted:
+ # check the vdb in case this is multislot
+ available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
+ for cpv in vardb.match(cp))
+ slotted = len(available_slots) > 1 or \
+ (len(available_slots) == 1 and "0" not in available_slots)
+ if slotted and arg_atom.without_repo != cp:
+ # If the user gave a specific atom, store it as a
+ # slot atom in the world file.
+ slot_atom = pkg.slot_atom
+
+ # For USE=multislot, there are a couple of cases to
+ # handle here:
+ #
+ # 1) SLOT="0", but the real SLOT spontaneously changed to some
+ # unknown value, so just record an unslotted atom.
+ #
+ # 2) SLOT comes from an installed package and there is no
+ # matching SLOT in the portage tree.
+ #
+ # Make sure that the slot atom is available in either the
+ # portdb or the vardb, since otherwise the user certainly
+ # doesn't want the SLOT atom recorded in the world file
+ # (case 1 above). If it's only available in the vardb,
+ # the user may be trying to prevent a USE=multislot
+ # package from being removed by --depclean (case 2 above).
+
+ mydb = portdb
+ if not portdb.match(slot_atom):
+ # SLOT seems to come from an installed multislot package
+ mydb = vardb
+ # If there is no installed package matching the SLOT atom,
+ # it probably changed SLOT spontaneously due to USE=multislot,
+ # so just record an unslotted atom.
+ if vardb.match(slot_atom):
+ # Now verify that the argument is precise
+ # enough to identify a specific slot.
+ matches = mydb.match(arg_atom)
+ matched_slots = set()
+ for cpv in matches:
+ matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+ if len(matched_slots) == 1:
+ new_world_atom = slot_atom
+ if arg_atom.repo:
+ new_world_atom += _repo_separator + arg_atom.repo
+
+ if new_world_atom == sets["selected"].findAtomForPackage(pkg):
+ # Both atoms would be identical, so there's nothing to add.
+ return None
+ if not slotted and not arg_atom.repo:
+ # Unlike world atoms, system atoms are not greedy for slots, so they
+ # can't be safely excluded from world if they are slotted.
+ system_atom = sets["system"].findAtomForPackage(pkg)
+ if system_atom:
+ if not system_atom.cp.startswith("virtual/"):
+ return None
+ # System virtuals aren't safe to exclude from world since they can
+ # match multiple old-style virtuals but only one of them will be
+ # pulled in by update or depclean.
+ providers = portdb.settings.getvirtuals().get(system_atom.cp)
+ if providers and len(providers) == 1 and \
+ providers[0].cp == arg_atom.cp:
+ return None
+ return new_world_atom
+
diff --git a/portage_with_autodep/pym/_emerge/depgraph.py b/portage_with_autodep/pym/_emerge/depgraph.py
new file mode 100644
index 0000000..5b48aca
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/depgraph.py
@@ -0,0 +1,7029 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import difflib
+import errno
+import io
+import logging
+import stat
+import sys
+import textwrap
+from collections import deque
+from itertools import chain
+
+import portage
+from portage import os, OrderedDict
+from portage import _unicode_decode, _unicode_encode, _encodings
+from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
+from portage.dbapi import dbapi
+from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
+from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
+from portage.exception import InvalidAtom, InvalidDependString, PortageException
+from portage.output import colorize, create_color_func, \
+ darkgreen, green
+bad = create_color_func("BAD")
+from portage.package.ebuild.getmaskingstatus import \
+ _getmaskingstatus, _MaskReason
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import ConfigProtect, shlex_split, new_protect_filename
+from portage.util import cmp_sort_key, writemsg, writemsg_stdout
+from portage.util import ensure_dirs
+from portage.util import writemsg_level, write_atomic
+from portage.util.digraph import digraph
+from portage.util.listdir import _ignorecvs_dirs
+from portage.versions import catpkgsplit
+
+from _emerge.AtomArg import AtomArg
+from _emerge.Blocker import Blocker
+from _emerge.BlockerCache import BlockerCache
+from _emerge.BlockerDepPriority import BlockerDepPriority
+from _emerge.countdown import countdown
+from _emerge.create_world_atom import create_world_atom
+from _emerge.Dependency import Dependency
+from _emerge.DependencyArg import DependencyArg
+from _emerge.DepPriority import DepPriority
+from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.FakeVartree import FakeVartree
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge.is_valid_package_atom import insert_category_into_atom, \
+ is_valid_package_atom
+from _emerge.Package import Package
+from _emerge.PackageArg import PackageArg
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from _emerge.RootConfig import RootConfig
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.userquery import userquery
+
+from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
+from _emerge.resolver.slot_collision import slot_conflict_handler
+from _emerge.resolver.circular_dependency import circular_dependency_handler
+from _emerge.resolver.output import Display
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class _scheduler_graph_config(object):
+ def __init__(self, trees, pkg_cache, graph, mergelist):
+ self.trees = trees
+ self.pkg_cache = pkg_cache
+ self.graph = graph
+ self.mergelist = mergelist
+
+def _wildcard_set(atoms):
+ pkgs = InternalPackageSet(allow_wildcard=True)
+ for x in atoms:
+ try:
+ x = Atom(x, allow_wildcard=True)
+ except portage.exception.InvalidAtom:
+ x = Atom("*/" + x, allow_wildcard=True)
+ pkgs.add(x)
+ return pkgs
+
+class _frozen_depgraph_config(object):
+
+ def __init__(self, settings, trees, myopts, spinner):
+ self.settings = settings
+ self.target_root = settings["ROOT"]
+ self.myopts = myopts
+ self.edebug = 0
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ self.edebug = 1
+ self.spinner = spinner
+ self._running_root = trees["/"]["root_config"]
+ self._opts_no_restart = frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri", "--pretend"])
+ self.pkgsettings = {}
+ self.trees = {}
+ self._trees_orig = trees
+ self.roots = {}
+ # All Package instances
+ self._pkg_cache = {}
+ self._highest_license_masked = {}
+ for myroot in trees:
+ self.trees[myroot] = {}
+ # Create a RootConfig instance that references
+ # the FakeVartree instead of the real one.
+ self.roots[myroot] = RootConfig(
+ trees[myroot]["vartree"].settings,
+ self.trees[myroot],
+ trees[myroot]["root_config"].setconfig)
+ for tree in ("porttree", "bintree"):
+ self.trees[myroot][tree] = trees[myroot][tree]
+ self.trees[myroot]["vartree"] = \
+ FakeVartree(trees[myroot]["root_config"],
+ pkg_cache=self._pkg_cache,
+ pkg_root_config=self.roots[myroot])
+ self.pkgsettings[myroot] = portage.config(
+ clone=self.trees[myroot]["vartree"].settings)
+
+ self._required_set_names = set(["world"])
+
+ atoms = ' '.join(myopts.get("--exclude", [])).split()
+ self.excluded_pkgs = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
+ self.reinstall_atoms = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
+ self.usepkg_exclude = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
+ self.useoldpkg_atoms = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
+ self.rebuild_exclude = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
+ self.rebuild_ignore = _wildcard_set(atoms)
+
+ self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
+ self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
+ self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
+
+class _depgraph_sets(object):
+ def __init__(self):
+ # contains all sets added to the graph
+ self.sets = {}
+ # contains non-set atoms given as arguments
+ self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
+ # contains all atoms from all sets added to the graph, including
+ # atoms given as arguments
+ self.atoms = InternalPackageSet(allow_repo=True)
+ self.atom_arg_map = {}
+
+class _rebuild_config(object):
+ def __init__(self, frozen_config, backtrack_parameters):
+ self._graph = digraph()
+ self._frozen_config = frozen_config
+ self.rebuild_list = backtrack_parameters.rebuild_list.copy()
+ self.orig_rebuild_list = self.rebuild_list.copy()
+ self.reinstall_list = backtrack_parameters.reinstall_list.copy()
+ self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
+ self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
+ self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
+ self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
+ self.rebuild_if_unbuilt)
+
+ def add(self, dep_pkg, dep):
+ parent = dep.collapsed_parent
+ priority = dep.collapsed_priority
+ rebuild_exclude = self._frozen_config.rebuild_exclude
+ rebuild_ignore = self._frozen_config.rebuild_ignore
+ if (self.rebuild and isinstance(parent, Package) and
+ parent.built and (priority.buildtime or priority.runtime) and
+ isinstance(dep_pkg, Package) and
+ not rebuild_exclude.findAtomForPackage(parent) and
+ not rebuild_ignore.findAtomForPackage(dep_pkg)):
+ self._graph.add(dep_pkg, parent, priority)
+
+ def _needs_rebuild(self, dep_pkg):
+ """Check whether packages that depend on dep_pkg need to be rebuilt."""
+ dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
+ if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
+ return False
+
+ if self.rebuild_if_unbuilt:
+ # dep_pkg is being installed from source, so binary
+ # packages for parents are invalid. Force rebuild
+ return True
+
+ trees = self._frozen_config.trees
+ vardb = trees[dep_pkg.root]["vartree"].dbapi
+ if self.rebuild_if_new_rev:
+ # Parent packages are valid if a package with the same
+ # cpv is already installed.
+ return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
+
+ # Otherwise, parent packages are valid if a package with the same
+ # version (excluding revision) is already installed.
+ assert self.rebuild_if_new_ver
+ cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+ for inst_cpv in vardb.match(dep_pkg.slot_atom):
+ inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
+ if inst_cpv_norev == cpv_norev:
+ return False
+
+ return True
+
+ def _trigger_rebuild(self, parent, build_deps, runtime_deps):
+ root_slot = (parent.root, parent.slot_atom)
+ if root_slot in self.rebuild_list:
+ return False
+ trees = self._frozen_config.trees
+ children = set(build_deps).intersection(runtime_deps)
+ reinstall = False
+ for slot_atom in children:
+ kids = set([build_deps[slot_atom], runtime_deps[slot_atom]])
+ for dep_pkg in kids:
+ dep_root_slot = (dep_pkg.root, slot_atom)
+ if self._needs_rebuild(dep_pkg):
+ self.rebuild_list.add(root_slot)
+ return True
+ elif ("--usepkg" in self._frozen_config.myopts and
+ (dep_root_slot in self.reinstall_list or
+ dep_root_slot in self.rebuild_list or
+ not dep_pkg.installed)):
+
+ # A direct rebuild dependency is being installed. We
+ # should update the parent as well to the latest binary,
+ # if that binary is valid.
+ #
+ # To validate the binary, we check whether all of the
+ # rebuild dependencies are present on the same binhost.
+ #
+ # 1) If parent is present on the binhost, but one of its
+ # rebuild dependencies is not, then the parent should
+ # be rebuilt from source.
+ # 2) Otherwise, the parent binary is assumed to be valid,
+ # because all of its rebuild dependencies are
+ # consistent.
+ bintree = trees[parent.root]["bintree"]
+ uri = bintree.get_pkgindex_uri(parent.cpv)
+ dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
+ bindb = bintree.dbapi
+ if self.rebuild_if_new_ver and uri and uri != dep_uri:
+ cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+ for cpv in bindb.match(dep_pkg.slot_atom):
+ if cpv_norev == catpkgsplit(cpv)[:-1]:
+ dep_uri = bintree.get_pkgindex_uri(cpv)
+ if uri == dep_uri:
+ break
+ if uri and uri != dep_uri:
+ # 1) Remote binary package is invalid because it was
+ # built without dep_pkg. Force rebuild.
+ self.rebuild_list.add(root_slot)
+ return True
+ elif (parent.installed and
+ root_slot not in self.reinstall_list):
+ inst_build_time = parent.metadata.get("BUILD_TIME")
+ try:
+ bin_build_time, = bindb.aux_get(parent.cpv,
+ ["BUILD_TIME"])
+ except KeyError:
+ continue
+ if bin_build_time != inst_build_time:
+ # 2) Remote binary package is valid, and local package
+ # is not up to date. Force reinstall.
+ reinstall = True
+ if reinstall:
+ self.reinstall_list.add(root_slot)
+ return reinstall
+
+ def trigger_rebuilds(self):
+ """
+ Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
+ depends on pkgA at both build-time and run-time, pkgB needs to be
+ rebuilt.
+ """
+ need_restart = False
+ graph = self._graph
+ build_deps = {}
+ runtime_deps = {}
+ leaf_nodes = deque(graph.leaf_nodes())
+
+ def ignore_non_runtime(priority):
+ return not priority.runtime
+
+ def ignore_non_buildtime(priority):
+ return not priority.buildtime
+
+ # Trigger rebuilds bottom-up (starting with the leaves) so that parents
+ # will always know which children are being rebuilt.
+ while graph:
+ if not leaf_nodes:
+ # We're interested in intersection of buildtime and runtime,
+ # so ignore edges that do not contain both.
+ leaf_nodes.extend(graph.leaf_nodes(
+ ignore_priority=ignore_non_runtime))
+ if not leaf_nodes:
+ leaf_nodes.extend(graph.leaf_nodes(
+ ignore_priority=ignore_non_buildtime))
+ if not leaf_nodes:
+ # We'll have to drop an edge that is both
+ # buildtime and runtime. This should be
+ # quite rare.
+ leaf_nodes.append(graph.order[-1])
+
+ node = leaf_nodes.popleft()
+ if node not in graph:
+ # This can be triggered by circular dependencies.
+ continue
+ slot_atom = node.slot_atom
+
+ # Remove our leaf node from the graph, keeping track of deps.
+ parents = graph.nodes[node][1].items()
+ graph.remove(node)
+ node_build_deps = build_deps.get(node, {})
+ node_runtime_deps = runtime_deps.get(node, {})
+ for parent, priorities in parents:
+ if parent == node:
+ # Ignore a direct cycle.
+ continue
+ parent_bdeps = build_deps.setdefault(parent, {})
+ parent_rdeps = runtime_deps.setdefault(parent, {})
+ for priority in priorities:
+ if priority.buildtime:
+ parent_bdeps[slot_atom] = node
+ if priority.runtime:
+ parent_rdeps[slot_atom] = node
+ if slot_atom in parent_bdeps and slot_atom in parent_rdeps:
+ parent_rdeps.update(node_runtime_deps)
+ if not graph.child_nodes(parent):
+ leaf_nodes.append(parent)
+
+ # Trigger rebuilds for our leaf node. Because all of our children
+ # have been processed, build_deps and runtime_deps will be
+ # completely filled in, and self.rebuild_list / self.reinstall_list
+ # will tell us whether any of our children need to be rebuilt or
+ # reinstalled.
+ if self._trigger_rebuild(node, node_build_deps, node_runtime_deps):
+ need_restart = True
+
+ return need_restart
+
+
+class _dynamic_depgraph_config(object):
+
+ def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
+ self.myparams = myparams.copy()
+ self._vdb_loaded = False
+ self._allow_backtracking = allow_backtracking
+ # Maps slot atom to package for each Package added to the graph.
+ self._slot_pkg_map = {}
+ # Maps nodes to the reasons they were selected for reinstallation.
+ self._reinstall_nodes = {}
+ self.mydbapi = {}
+ # Contains a filtered view of preferred packages that are selected
+ # from available repositories.
+ self._filtered_trees = {}
+ # Contains installed packages and new packages that have been added
+ # to the graph.
+ self._graph_trees = {}
+ # Caches visible packages returned from _select_package, for use in
+ # depgraph._iter_atoms_for_pkg() SLOT logic.
+ self._visible_pkgs = {}
+ #contains the args created by select_files
+ self._initial_arg_list = []
+ self.digraph = portage.digraph()
+ # manages sets added to the graph
+ self.sets = {}
+ # contains all nodes pulled in by self.sets
+ self._set_nodes = set()
+ # Contains only Blocker -> Uninstall edges
+ self._blocker_uninstalls = digraph()
+ # Contains only Package -> Blocker edges
+ self._blocker_parents = digraph()
+ # Contains only irrelevant Package -> Blocker edges
+ self._irrelevant_blockers = digraph()
+ # Contains only unsolvable Package -> Blocker edges
+ self._unsolvable_blockers = digraph()
+ # Contains all Blocker -> Blocked Package edges
+ self._blocked_pkgs = digraph()
+ # Contains world packages that have been protected from
+ # uninstallation but may not have been added to the graph
+ # if the graph is not complete yet.
+ self._blocked_world_pkgs = {}
+ # Contains packages whose dependencies have been traversed.
+ # This use used to check if we have accounted for blockers
+ # relevant to a package.
+ self._traversed_pkg_deps = set()
+ self._slot_collision_info = {}
+ # Slot collision nodes are not allowed to block other packages since
+ # blocker validation is only able to account for one package per slot.
+ self._slot_collision_nodes = set()
+ self._parent_atoms = {}
+ self._slot_conflict_parent_atoms = set()
+ self._slot_conflict_handler = None
+ self._circular_dependency_handler = None
+ self._serialized_tasks_cache = None
+ self._scheduler_graph = None
+ self._displayed_list = None
+ self._pprovided_args = []
+ self._missing_args = []
+ self._masked_installed = set()
+ self._masked_license_updates = set()
+ self._unsatisfied_deps_for_display = []
+ self._unsatisfied_blockers_for_display = None
+ self._circular_deps_for_display = None
+ self._dep_stack = []
+ self._dep_disjunctive_stack = []
+ self._unsatisfied_deps = []
+ self._initially_unsatisfied_deps = []
+ self._ignored_deps = []
+ self._highest_pkg_cache = {}
+
+ self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
+ self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
+ self._needed_license_changes = backtrack_parameters.needed_license_changes
+ self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
+ self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
+ self._need_restart = False
+ # For conditions that always require user intervention, such as
+ # unsatisfied REQUIRED_USE (currently has no autounmask support).
+ self._skip_restart = False
+ self._backtrack_infos = {}
+
+ self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
+ self._success_without_autounmask = False
+ self._traverse_ignored_deps = False
+
+ for myroot in depgraph._frozen_config.trees:
+ self.sets[myroot] = _depgraph_sets()
+ self._slot_pkg_map[myroot] = {}
+ vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
+ # This dbapi instance will model the state that the vdb will
+ # have after new packages have been installed.
+ fakedb = PackageVirtualDbapi(vardb.settings)
+
+ self.mydbapi[myroot] = fakedb
+ def graph_tree():
+ pass
+ graph_tree.dbapi = fakedb
+ self._graph_trees[myroot] = {}
+ self._filtered_trees[myroot] = {}
+ # Substitute the graph tree for the vartree in dep_check() since we
+ # want atom selections to be consistent with package selections
+ # have already been made.
+ self._graph_trees[myroot]["porttree"] = graph_tree
+ self._graph_trees[myroot]["vartree"] = graph_tree
+ self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
+ self._graph_trees[myroot]["graph"] = self.digraph
+ def filtered_tree():
+ pass
+ filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
+ self._filtered_trees[myroot]["porttree"] = filtered_tree
+ self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
+
+ # Passing in graph_tree as the vartree here could lead to better
+ # atom selections in some cases by causing atoms for packages that
+ # have been added to the graph to be preferred over other choices.
+ # However, it can trigger atom selections that result in
+ # unresolvable direct circular dependencies. For example, this
+ # happens with gwydion-dylan which depends on either itself or
+ # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
+ # gwydion-dylan-bin needs to be selected in order to avoid a
+ # an unresolvable direct circular dependency.
+ #
+ # To solve the problem described above, pass in "graph_db" so that
+ # packages that have been added to the graph are distinguishable
+ # from other available packages and installed packages. Also, pass
+ # the parent package into self._select_atoms() calls so that
+ # unresolvable direct circular dependencies can be detected and
+ # avoided when possible.
+ self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
+ self._filtered_trees[myroot]["graph"] = self.digraph
+ self._filtered_trees[myroot]["vartree"] = \
+ depgraph._frozen_config.trees[myroot]["vartree"]
+
+ dbs = []
+ # (db, pkg_type, built, installed, db_keys)
+ if "remove" in self.myparams:
+ # For removal operations, use _dep_check_composite_db
+ # for availability and visibility checks. This provides
+ # consistency with install operations, so we don't
+ # get install/uninstall cycles like in bug #332719.
+ self._graph_trees[myroot]["porttree"] = filtered_tree
+ else:
+ if "--usepkgonly" not in depgraph._frozen_config.myopts:
+ portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
+ db_keys = list(portdb._aux_cache_keys)
+ dbs.append((portdb, "ebuild", False, False, db_keys))
+
+ if "--usepkg" in depgraph._frozen_config.myopts:
+ bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
+ db_keys = list(bindb._aux_cache_keys)
+ dbs.append((bindb, "binary", True, False, db_keys))
+
+ vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
+ db_keys = list(depgraph._frozen_config._trees_orig[myroot
+ ]["vartree"].dbapi._aux_cache_keys)
+ dbs.append((vardb, "installed", True, True, db_keys))
+ self._filtered_trees[myroot]["dbs"] = dbs
+
+class depgraph(object):
+
+ pkg_tree_map = RootConfig.pkg_tree_map
+
+ _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+
+ def __init__(self, settings, trees, myopts, myparams, spinner,
+ frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
+ if frozen_config is None:
+ frozen_config = _frozen_depgraph_config(settings, trees,
+ myopts, spinner)
+ self._frozen_config = frozen_config
+ self._dynamic_config = _dynamic_depgraph_config(self, myparams,
+ allow_backtracking, backtrack_parameters)
+ self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
+
+ self._select_atoms = self._select_atoms_highest_available
+ self._select_package = self._select_pkg_highest_available
+
+ def _load_vdb(self):
+ """
+ Load installed package metadata if appropriate. This used to be called
+ from the constructor, but that wasn't very nice since this procedure
+ is slow and it generates spinner output. So, now it's called on-demand
+ by various methods when necessary.
+ """
+
+ if self._dynamic_config._vdb_loaded:
+ return
+
+ for myroot in self._frozen_config.trees:
+
+ preload_installed_pkgs = \
+ "--nodeps" not in self._frozen_config.myopts
+
+ fake_vartree = self._frozen_config.trees[myroot]["vartree"]
+ if not fake_vartree.dbapi:
+ # This needs to be called for the first depgraph, but not for
+ # backtracking depgraphs that share the same frozen_config.
+ fake_vartree.sync()
+
+ # FakeVartree.sync() populates virtuals, and we want
+ # self.pkgsettings to have them populated too.
+ self._frozen_config.pkgsettings[myroot] = \
+ portage.config(clone=fake_vartree.settings)
+
+ if preload_installed_pkgs:
+ vardb = fake_vartree.dbapi
+ fakedb = self._dynamic_config._graph_trees[
+ myroot]["vartree"].dbapi
+
+ for pkg in vardb:
+ self._spinner_update()
+ # This triggers metadata updates via FakeVartree.
+ vardb.aux_get(pkg.cpv, [])
+ fakedb.cpv_inject(pkg)
+
+ self._dynamic_config._vdb_loaded = True
+
+ def _spinner_update(self):
+ if self._frozen_config.spinner:
+ self._frozen_config.spinner.update()
+
+ def _show_missed_update(self):
+
+ # In order to minimize noise, show only the highest
+ # missed update from each SLOT.
+ missed_updates = {}
+ for pkg, mask_reasons in \
+ self._dynamic_config._runtime_pkg_mask.items():
+ if pkg.installed:
+ # Exclude installed here since we only
+ # want to show available updates.
+ continue
+ k = (pkg.root, pkg.slot_atom)
+ if k in missed_updates:
+ other_pkg, mask_type, parent_atoms = missed_updates[k]
+ if other_pkg > pkg:
+ continue
+ for mask_type, parent_atoms in mask_reasons.items():
+ if not parent_atoms:
+ continue
+ missed_updates[k] = (pkg, mask_type, parent_atoms)
+ break
+
+ if not missed_updates:
+ return
+
+ missed_update_types = {}
+ for pkg, mask_type, parent_atoms in missed_updates.values():
+ missed_update_types.setdefault(mask_type,
+ []).append((pkg, parent_atoms))
+
+ if '--quiet' in self._frozen_config.myopts and \
+ '--debug' not in self._frozen_config.myopts:
+ missed_update_types.pop("slot conflict", None)
+ missed_update_types.pop("missing dependency", None)
+
+ self._show_missed_update_slot_conflicts(
+ missed_update_types.get("slot conflict"))
+
+ self._show_missed_update_unsatisfied_dep(
+ missed_update_types.get("missing dependency"))
+
+ def _show_missed_update_unsatisfied_dep(self, missed_updates):
+
+ if not missed_updates:
+ return
+
+ backtrack_masked = []
+
+ for pkg, parent_atoms in missed_updates:
+
+ try:
+ for parent, root, atom in parent_atoms:
+ self._show_unsatisfied_dep(root, atom, myparent=parent,
+ check_backtrack=True)
+ except self._backtrack_mask:
+ # This is displayed below in abbreviated form.
+ backtrack_masked.append((pkg, parent_atoms))
+ continue
+
+ writemsg("\n!!! The following update has been skipped " + \
+ "due to unsatisfied dependencies:\n\n", noiselevel=-1)
+
+ writemsg(str(pkg.slot_atom), noiselevel=-1)
+ if pkg.root != '/':
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ for parent, root, atom in parent_atoms:
+ self._show_unsatisfied_dep(root, atom, myparent=parent)
+ writemsg("\n", noiselevel=-1)
+
+ if backtrack_masked:
+ # These are shown in abbreviated form, in order to avoid terminal
+ # flooding from mask messages as reported in bug #285832.
+ writemsg("\n!!! The following update(s) have been skipped " + \
+ "due to unsatisfied dependencies\n" + \
+ "!!! triggered by backtracking:\n\n", noiselevel=-1)
+ for pkg, parent_atoms in backtrack_masked:
+ writemsg(str(pkg.slot_atom), noiselevel=-1)
+ if pkg.root != '/':
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ def _show_missed_update_slot_conflicts(self, missed_updates):
+
+ if not missed_updates:
+ return
+
+ msg = []
+ msg.append("\nWARNING: One or more updates have been " + \
+ "skipped due to a dependency conflict:\n\n")
+
+ indent = " "
+ for pkg, parent_atoms in missed_updates:
+ msg.append(str(pkg.slot_atom))
+ if pkg.root != '/':
+ msg.append(" for %s" % (pkg.root,))
+ msg.append("\n\n")
+
+ for parent, atom in parent_atoms:
+ msg.append(indent)
+ msg.append(str(pkg))
+
+ msg.append(" conflicts with\n")
+ msg.append(2*indent)
+ if isinstance(parent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ msg.append(str(parent))
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ msg.append("%s required by %s" % (atom, parent))
+ msg.append("\n")
+ msg.append("\n")
+
+ writemsg("".join(msg), noiselevel=-1)
+
+ def _show_slot_collision_notice(self):
+ """Show an informational message advising the user to mask one of the
+ the packages. In some cases it may be possible to resolve this
+ automatically, but support for backtracking (removal nodes that have
+ already been selected) will be required in order to handle all possible
+ cases.
+ """
+
+ if not self._dynamic_config._slot_collision_info:
+ return
+
+ self._show_merge_list()
+
+ self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
+ handler = self._dynamic_config._slot_conflict_handler
+
+ conflict = handler.get_conflict()
+ writemsg(conflict, noiselevel=-1)
+
+ explanation = handler.get_explanation()
+ if explanation:
+ writemsg(explanation, noiselevel=-1)
+ return
+
+ if "--quiet" in self._frozen_config.myopts:
+ return
+
+ msg = []
+ msg.append("It may be possible to solve this problem ")
+ msg.append("by using package.mask to prevent one of ")
+ msg.append("those packages from being selected. ")
+ msg.append("However, it is also possible that conflicting ")
+ msg.append("dependencies exist such that they are impossible to ")
+ msg.append("satisfy simultaneously. If such a conflict exists in ")
+ msg.append("the dependencies of two different packages, then those ")
+ msg.append("packages can not be installed simultaneously.")
+ backtrack_opt = self._frozen_config.myopts.get('--backtrack')
+ if not self._dynamic_config._allow_backtracking and \
+ (backtrack_opt is None or \
+ (backtrack_opt > 0 and backtrack_opt < 30)):
+ msg.append(" You may want to try a larger value of the ")
+ msg.append("--backtrack option, such as --backtrack=30, ")
+ msg.append("in order to see if that will solve this conflict ")
+ msg.append("automatically.")
+
+ for line in textwrap.wrap(''.join(msg), 70):
+ writemsg(line + '\n', noiselevel=-1)
+ writemsg('\n', noiselevel=-1)
+
+ msg = []
+ msg.append("For more information, see MASKED PACKAGES ")
+ msg.append("section in the emerge man page or refer ")
+ msg.append("to the Gentoo Handbook.")
+ for line in textwrap.wrap(''.join(msg), 70):
+ writemsg(line + '\n', noiselevel=-1)
+ writemsg('\n', noiselevel=-1)
+
+ def _process_slot_conflicts(self):
+ """
+ Process slot conflict data to identify specific atoms which
+ lead to conflict. These atoms only match a subset of the
+ packages that have been pulled into a given slot.
+ """
+ for (slot_atom, root), slot_nodes \
+ in self._dynamic_config._slot_collision_info.items():
+
+ all_parent_atoms = set()
+ for pkg in slot_nodes:
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ continue
+ all_parent_atoms.update(parent_atoms)
+
+ for pkg in slot_nodes:
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if parent_atoms is None:
+ parent_atoms = set()
+ self._dynamic_config._parent_atoms[pkg] = parent_atoms
+ for parent_atom in all_parent_atoms:
+ if parent_atom in parent_atoms:
+ continue
+ # Use package set for matching since it will match via
+ # PROVIDE when necessary, while match_from_list does not.
+ parent, atom = parent_atom
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+ if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+ parent_atoms.add(parent_atom)
+ else:
+ self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
+
+ def _reinstall_for_flags(self, forced_flags,
+ orig_use, orig_iuse, cur_use, cur_iuse):
+ """Return a set of flags that trigger reinstallation, or None if there
+ are no such flags."""
+ if "--newuse" in self._frozen_config.myopts or \
+ "--binpkg-respect-use" in self._frozen_config.myopts:
+ flags = set(orig_iuse.symmetric_difference(
+ cur_iuse).difference(forced_flags))
+ flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use)))
+ if flags:
+ return flags
+ elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
+ flags = orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use))
+ if flags:
+ return flags
+ return None
+
+ def _create_graph(self, allow_unsatisfied=False):
+ dep_stack = self._dynamic_config._dep_stack
+ dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
+ while dep_stack or dep_disjunctive_stack:
+ self._spinner_update()
+ while dep_stack:
+ dep = dep_stack.pop()
+ if isinstance(dep, Package):
+ if not self._add_pkg_deps(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ continue
+ if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if dep_disjunctive_stack:
+ if not self._pop_disjunction(allow_unsatisfied):
+ return 0
+ return 1
+
+ def _expand_set_args(self, input_args, add_to_digraph=False):
+ """
+ Iterate over a list of DependencyArg instances and yield all
+ instances given in the input together with additional SetArg
+ instances that are generated from nested sets.
+ @param input_args: An iterable of DependencyArg instances
+ @type input_args: Iterable
+ @param add_to_digraph: If True then add SetArg instances
+ to the digraph, in order to record parent -> child
+ relationships from nested sets
+ @type add_to_digraph: Boolean
+ @rtype: Iterable
+ @returns: All args given in the input together with additional
+ SetArg instances that are generated from nested sets
+ """
+
+ traversed_set_args = set()
+
+ for arg in input_args:
+ if not isinstance(arg, SetArg):
+ yield arg
+ continue
+
+ root_config = arg.root_config
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ arg_stack = [arg]
+ while arg_stack:
+ arg = arg_stack.pop()
+ if arg in traversed_set_args:
+ continue
+ traversed_set_args.add(arg)
+
+ if add_to_digraph:
+ self._dynamic_config.digraph.add(arg, None,
+ priority=BlockerDepPriority.instance)
+
+ yield arg
+
+ # Traverse nested sets and add them to the stack
+ # if they're not already in the graph. Also, graph
+ # edges between parent and nested sets.
+ for token in arg.pset.getNonAtoms():
+ if not token.startswith(SETPREFIX):
+ continue
+ s = token[len(SETPREFIX):]
+ nested_set = depgraph_sets.sets.get(s)
+ if nested_set is None:
+ nested_set = root_config.sets.get(s)
+ if nested_set is not None:
+ nested_arg = SetArg(arg=token, pset=nested_set,
+ root_config=root_config)
+ arg_stack.append(nested_arg)
+ if add_to_digraph:
+ self._dynamic_config.digraph.add(nested_arg, arg,
+ priority=BlockerDepPriority.instance)
+ depgraph_sets.sets[nested_arg.name] = nested_arg.pset
+
+ def _add_dep(self, dep, allow_unsatisfied=False):
+ debug = "--debug" in self._frozen_config.myopts
+ buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
+ nodeps = "--nodeps" in self._frozen_config.myopts
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ recurse = deep is True or dep.depth <= deep
+ if dep.blocker:
+ if not buildpkgonly and \
+ not nodeps and \
+ not dep.collapsed_priority.ignored and \
+ not dep.collapsed_priority.optional and \
+ dep.parent not in self._dynamic_config._slot_collision_nodes:
+ if dep.parent.onlydeps:
+ # It's safe to ignore blockers if the
+ # parent is an --onlydeps node.
+ return 1
+ # The blocker applies to the root where
+ # the parent is or will be installed.
+ blocker = Blocker(atom=dep.atom,
+ eapi=dep.parent.metadata["EAPI"],
+ priority=dep.priority, root=dep.parent.root)
+ self._dynamic_config._blocker_parents.add(blocker, dep.parent)
+ return 1
+
+ if dep.child is None:
+ dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
+ onlydeps=dep.onlydeps)
+ else:
+ # The caller has selected a specific package
+ # via self._minimize_packages().
+ dep_pkg = dep.child
+ existing_node = self._dynamic_config._slot_pkg_map[
+ dep.root].get(dep_pkg.slot_atom)
+
+ if not dep_pkg:
+ if (dep.collapsed_priority.optional or
+ dep.collapsed_priority.ignored):
+ # This is an unnecessary build-time dep.
+ return 1
+ if allow_unsatisfied:
+ self._dynamic_config._unsatisfied_deps.append(dep)
+ return 1
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((dep.root, dep.atom), {"myparent":dep.parent}))
+
+ # The parent node should not already be in
+ # runtime_pkg_mask, since that would trigger an
+ # infinite backtracking loop.
+ if self._dynamic_config._allow_backtracking:
+ if dep.parent in self._dynamic_config._runtime_pkg_mask:
+ if "--debug" in self._frozen_config.myopts:
+ writemsg(
+ "!!! backtracking loop detected: %s %s\n" % \
+ (dep.parent,
+ self._dynamic_config._runtime_pkg_mask[
+ dep.parent]), noiselevel=-1)
+ elif not self.need_restart():
+ # Do not backtrack if only USE have to be changed in
+ # order to satisfy the dependency.
+ dep_pkg, existing_node = \
+ self._select_package(dep.root, dep.atom.without_use,
+ onlydeps=dep.onlydeps)
+ if dep_pkg is None:
+ self._dynamic_config._backtrack_infos["missing dependency"] = dep
+ self._dynamic_config._need_restart = True
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to unsatisfied dep:")
+ msg.append(" parent: %s" % dep.parent)
+ msg.append(" priority: %s" % dep.priority)
+ msg.append(" root: %s" % dep.root)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return 0
+
+ self._rebuild.add(dep_pkg, dep)
+
+ ignore = dep.collapsed_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps
+ if not ignore and not self._add_pkg(dep_pkg, dep):
+ return 0
+ return 1
+
+ def _check_slot_conflict(self, pkg, atom):
+ existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
+ matches = None
+ if existing_node:
+ matches = pkg.cpv == existing_node.cpv
+ if pkg != existing_node and \
+ atom is not None:
+ # Use package set for matching since it will match via
+ # PROVIDE when necessary, while match_from_list does not.
+ matches = bool(InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True).findAtomForPackage(existing_node,
+ modified_use=self._pkg_use_enabled(existing_node)))
+
+ return (existing_node, matches)
+
+ def _add_pkg(self, pkg, dep):
+ """
+ Adds a package to the depgraph, queues dependencies, and handles
+ slot conflicts.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+ myparent = None
+ priority = None
+ depth = 0
+ if dep is None:
+ dep = Dependency()
+ else:
+ myparent = dep.parent
+ priority = dep.priority
+ depth = dep.depth
+ if priority is None:
+ priority = DepPriority()
+
+ if debug:
+ writemsg_level(
+ "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
+ pkg_use_display(pkg, self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(pkg))),
+ level=logging.DEBUG, noiselevel=-1)
+ if isinstance(myparent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ writemsg_level(
+ "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
+ level=logging.DEBUG, noiselevel=-1)
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ writemsg_level(
+ "%s%s required by %s\n" %
+ ("Parent Dep:".ljust(15), dep.atom, myparent),
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Ensure that the dependencies of the same package
+ # are never processed more than once.
+ previously_added = pkg in self._dynamic_config.digraph
+
+ # select the correct /var database that we'll be checking against
+ vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+
+ arg_atoms = None
+ if True:
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except portage.exception.InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+ del e
+
+ # NOTE: REQUIRED_USE checks are delayed until after
+ # package selection, since we want to prompt the user
+ # for USE adjustment rather than have REQUIRED_USE
+ # affect package selection and || dep choices.
+ if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
+ eapi_has_required_use(pkg.metadata["EAPI"]):
+ required_use_is_sat = check_required_use(
+ pkg.metadata["REQUIRED_USE"],
+ self._pkg_use_enabled(pkg),
+ pkg.iuse.is_valid_flag)
+ if not required_use_is_sat:
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._add_parent_atom(pkg, parent_atom)
+
+ atom = dep.atom
+ if atom is None:
+ atom = Atom("=" + pkg.cpv)
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((pkg.root, atom), {"myparent":dep.parent}))
+ self._dynamic_config._skip_restart = True
+ return 0
+
+ if not pkg.onlydeps:
+
+ existing_node, existing_node_matches = \
+ self._check_slot_conflict(pkg, dep.atom)
+ slot_collision = False
+ if existing_node:
+ if existing_node_matches:
+ # The existing node can be reused.
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._dynamic_config.digraph.add(existing_node, parent,
+ priority=priority)
+ self._add_parent_atom(existing_node, parent_atom)
+ # If a direct circular dependency is not an unsatisfied
+ # buildtime dependency then drop it here since otherwise
+ # it can skew the merge order calculation in an unwanted
+ # way.
+ if existing_node != myparent or \
+ (priority.buildtime and not priority.satisfied):
+ self._dynamic_config.digraph.addnode(existing_node, myparent,
+ priority=priority)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(existing_node,
+ (dep.parent, dep.atom))
+ return 1
+ else:
+ # A slot conflict has occurred.
+ # The existing node should not already be in
+ # runtime_pkg_mask, since that would trigger an
+ # infinite backtracking loop.
+ if self._dynamic_config._allow_backtracking and \
+ existing_node in \
+ self._dynamic_config._runtime_pkg_mask:
+ if "--debug" in self._frozen_config.myopts:
+ writemsg(
+ "!!! backtracking loop detected: %s %s\n" % \
+ (existing_node,
+ self._dynamic_config._runtime_pkg_mask[
+ existing_node]), noiselevel=-1)
+ elif self._dynamic_config._allow_backtracking and \
+ not self._accept_blocker_conflicts() and \
+ not self.need_restart():
+
+ self._add_slot_conflict(pkg)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._add_parent_atom(pkg, parent_atom)
+ self._process_slot_conflicts()
+
+ backtrack_data = []
+ fallback_data = []
+ all_parents = set()
+ # The ordering of backtrack_data can make
+ # a difference here, because both mask actions may lead
+ # to valid, but different, solutions and the one with
+ # 'existing_node' masked is usually the better one. Because
+ # of that, we choose an order such that
+ # the backtracker will first explore the choice with
+ # existing_node masked. The backtracker reverses the
+ # order, so the order it uses is the reverse of the
+ # order shown here. See bug #339606.
+ for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
+ # For missed update messages, find out which
+ # atoms matched to_be_selected that did not
+ # match to_be_masked.
+ parent_atoms = \
+ self._dynamic_config._parent_atoms.get(to_be_selected, set())
+ if parent_atoms:
+ conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
+ if conflict_atoms:
+ parent_atoms = conflict_atoms
+
+ all_parents.update(parent_atoms)
+
+ all_match = True
+ for parent, atom in parent_atoms:
+ i = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ if not i.findAtomForPackage(to_be_masked):
+ all_match = False
+ break
+
+ if to_be_selected >= to_be_masked:
+ # We only care about the parent atoms
+ # when they trigger a downgrade.
+ parent_atoms = set()
+
+ fallback_data.append((to_be_masked, parent_atoms))
+
+ if all_match:
+ # 'to_be_masked' does not violate any parent atom, which means
+ # there is no point in masking it.
+ pass
+ else:
+ backtrack_data.append((to_be_masked, parent_atoms))
+
+ if not backtrack_data:
+ # This shouldn't happen, but fall back to the old
+ # behavior if this gets triggered somehow.
+ backtrack_data = fallback_data
+
+ if len(backtrack_data) > 1:
+ # NOTE: Generally, we prefer to mask the higher
+ # version since this solves common cases in which a
+ # lower version is needed so that all dependencies
+ # will be satisfied (bug #337178). However, if
+ # existing_node happens to be installed then we
+ # mask that since this is a common case that is
+ # triggered when --update is not enabled.
+ if existing_node.installed:
+ pass
+ elif pkg > existing_node:
+ backtrack_data.reverse()
+
+ to_be_masked = backtrack_data[-1][0]
+
+ self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
+ self._dynamic_config._need_restart = True
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to slot conflict:")
+ if backtrack_data is fallback_data:
+ msg.append("!!! backtrack_data fallback")
+ msg.append(" first package: %s" % existing_node)
+ msg.append(" second package: %s" % pkg)
+ msg.append(" package to mask: %s" % to_be_masked)
+ msg.append(" slot: %s" % pkg.slot_atom)
+ msg.append(" parents: %s" % ", ".join( \
+ "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
+ msg.append("")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ noiselevel=-1, level=logging.DEBUG)
+ return 0
+
+ # A slot collision has occurred. Sometimes this coincides
+ # with unresolvable blockers, so the slot collision will be
+ # shown later if there are no unresolvable blockers.
+ self._add_slot_conflict(pkg)
+ slot_collision = True
+
+ if debug:
+ writemsg_level(
+ "%s%s %s\n" % ("Slot Conflict:".ljust(15),
+ existing_node, pkg_use_display(existing_node,
+ self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(existing_node))),
+ level=logging.DEBUG, noiselevel=-1)
+
+ if slot_collision:
+ # Now add this node to the graph so that self.display()
+ # can show use flags and --tree portage.output. This node is
+ # only being partially added to the graph. It must not be
+ # allowed to interfere with the other nodes that have been
+ # added. Do not overwrite data for existing nodes in
+ # self._dynamic_config.mydbapi since that data will be used for blocker
+ # validation.
+ # Even though the graph is now invalid, continue to process
+ # dependencies so that things like --fetchonly can still
+ # function despite collisions.
+ pass
+ elif not previously_added:
+ self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
+ self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
+ self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+ self._dynamic_config._highest_pkg_cache.clear()
+ self._check_masks(pkg)
+
+ if not pkg.installed:
+ # Allow this package to satisfy old-style virtuals in case it
+ # doesn't already. Any pre-existing providers will be preferred
+ # over this one.
+ try:
+ pkgsettings.setinst(pkg.cpv, pkg.metadata)
+ # For consistency, also update the global virtuals.
+ settings = self._frozen_config.roots[pkg.root].settings
+ settings.unlock()
+ settings.setinst(pkg.cpv, pkg.metadata)
+ settings.lock()
+ except portage.exception.InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+
+ if arg_atoms:
+ self._dynamic_config._set_nodes.add(pkg)
+
+ # Do this even when addme is False (--onlydeps) so that the
+ # parent/child relationship is always known in case
+ # self._show_slot_collision_notice() needs to be called later.
+ self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._dynamic_config.digraph.add(pkg, parent, priority=priority)
+ self._add_parent_atom(pkg, parent_atom)
+
+ """ This section determines whether we go deeper into dependencies or not.
+ We want to go deeper on a few occasions:
+ Installing package A, we need to make sure package A's deps are met.
+ emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
+ If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
+ """
+ if arg_atoms:
+ depth = 0
+ pkg.depth = depth
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ recurse = deep is True or depth + 1 <= deep
+ dep_stack = self._dynamic_config._dep_stack
+ if "recurse" not in self._dynamic_config.myparams:
+ return 1
+ elif pkg.installed and not recurse:
+ dep_stack = self._dynamic_config._ignored_deps
+
+ self._spinner_update()
+
+ if not previously_added:
+ dep_stack.append(pkg)
+ return 1
+
+ def _check_masks(self, pkg):
+
+ slot_key = (pkg.root, pkg.slot_atom)
+
+ # Check for upgrades in the same slot that are
+ # masked due to a LICENSE change in a newer
+ # version that is not masked for any other reason.
+ other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
+ if other_pkg is not None and pkg < other_pkg:
+ self._dynamic_config._masked_license_updates.add(other_pkg)
+
+ def _add_parent_atom(self, pkg, parent_atom):
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if parent_atoms is None:
+ parent_atoms = set()
+ self._dynamic_config._parent_atoms[pkg] = parent_atoms
+ parent_atoms.add(parent_atom)
+
+ def _add_slot_conflict(self, pkg):
+ self._dynamic_config._slot_collision_nodes.add(pkg)
+ slot_key = (pkg.slot_atom, pkg.root)
+ slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
+ if slot_nodes is None:
+ slot_nodes = set()
+ slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
+ self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
+ slot_nodes.add(pkg)
+
+ def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
+
+ mytype = pkg.type_name
+ myroot = pkg.root
+ mykey = pkg.cpv
+ metadata = pkg.metadata
+ myuse = self._pkg_use_enabled(pkg)
+ jbigkey = pkg
+ depth = pkg.depth + 1
+ removal_action = "remove" in self._dynamic_config.myparams
+
+ edepend={}
+ depkeys = ["DEPEND","RDEPEND","PDEPEND"]
+ for k in depkeys:
+ edepend[k] = metadata[k]
+
+ if not pkg.built and \
+ "--buildpkgonly" in self._frozen_config.myopts and \
+ "deep" not in self._dynamic_config.myparams:
+ edepend["RDEPEND"] = ""
+ edepend["PDEPEND"] = ""
+
+ ignore_build_time_deps = False
+ if pkg.built and not removal_action:
+ if self._dynamic_config.myparams.get("bdeps", "n") == "y":
+ # Pull in build time deps as requested, but marked them as
+ # "optional" since they are not strictly required. This allows
+ # more freedom in the merge order calculation for solving
+ # circular dependencies. Don't convert to PDEPEND since that
+ # could make --with-bdeps=y less effective if it is used to
+ # adjust merge order to prevent built_with_use() calls from
+ # failing.
+ pass
+ else:
+ ignore_build_time_deps = True
+
+ if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
+ # Removal actions never traverse ignored buildtime
+ # dependencies, so it's safe to discard them early.
+ edepend["DEPEND"] = ""
+ ignore_build_time_deps = True
+
+ if removal_action:
+ depend_root = myroot
+ else:
+ depend_root = "/"
+ root_deps = self._frozen_config.myopts.get("--root-deps")
+ if root_deps is not None:
+ if root_deps is True:
+ depend_root = myroot
+ elif root_deps == "rdeps":
+ ignore_build_time_deps = True
+
+ # If rebuild mode is not enabled, it's safe to discard ignored
+ # build-time dependencies. If you want these deps to be traversed
+ # in "complete" mode then you need to specify --with-bdeps=y.
+ if ignore_build_time_deps and \
+ not self._rebuild.rebuild:
+ edepend["DEPEND"] = ""
+
+ deps = (
+ (depend_root, edepend["DEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_build_time_deps),
+ ignored=ignore_build_time_deps)),
+ (myroot, edepend["RDEPEND"],
+ self._priority(runtime=True)),
+ (myroot, edepend["PDEPEND"],
+ self._priority(runtime_post=True))
+ )
+
+ debug = "--debug" in self._frozen_config.myopts
+ strict = mytype != "installed"
+
+ for dep_root, dep_string, dep_priority in deps:
+ if not dep_string:
+ continue
+ if debug:
+ writemsg_level("\nParent: %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Depstring: %s\n" % (dep_string,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Priority: %s\n" % (dep_priority,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
+ except portage.exception.InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+ del e
+
+ # Try again, but omit the is_valid_flag argument, since
+ # invalid USE conditionals are a common problem and it's
+ # practical to ignore this issue for installed packages.
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=self._pkg_use_enabled(pkg))
+ except portage.exception.InvalidDependString as e:
+ self._dynamic_config._masked_installed.add(pkg)
+ del e
+ continue
+
+ try:
+ dep_string = list(self._queue_disjunctive_deps(
+ pkg, dep_root, dep_priority, dep_string))
+ except portage.exception.InvalidDependString as e:
+ if pkg.installed:
+ self._dynamic_config._masked_installed.add(pkg)
+ del e
+ continue
+
+ # should have been masked before it was selected
+ raise
+
+ if not dep_string:
+ continue
+
+ dep_string = portage.dep.paren_enclose(dep_string,
+ unevaluated_atom=True)
+
+ if not self._add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied):
+ return 0
+
+ self._dynamic_config._traversed_pkg_deps.add(pkg)
+ return 1
+
+ def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied):
+ _autounmask_backup = self._dynamic_config._autounmask
+ if dep_priority.optional or dep_priority.ignored:
+ # Temporarily disable autounmask for deps that
+ # don't necessarily need to be satisfied.
+ self._dynamic_config._autounmask = False
+ try:
+ return self._wrapped_add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied)
+ finally:
+ self._dynamic_config._autounmask = _autounmask_backup
+
+ def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
+ dep_string, allow_unsatisfied):
+ depth = pkg.depth + 1
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ recurse_satisfied = deep is True or depth <= deep
+ debug = "--debug" in self._frozen_config.myopts
+ strict = pkg.type_name != "installed"
+
+ if debug:
+ writemsg_level("\nParent: %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Depstring: %s\n" % (dep_string,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Priority: %s\n" % (dep_priority,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ selected_atoms = self._select_atoms(dep_root,
+ dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
+ strict=strict, priority=dep_priority)
+ except portage.exception.InvalidDependString as e:
+ if pkg.installed:
+ self._dynamic_config._masked_installed.add(pkg)
+ return 1
+
+ # should have been masked before it was selected
+ raise
+
+ if debug:
+ writemsg_level("Candidates: %s\n" % \
+ ([str(x) for x in selected_atoms[pkg]],),
+ noiselevel=-1, level=logging.DEBUG)
+
+ root_config = self._frozen_config.roots[dep_root]
+ vardb = root_config.trees["vartree"].dbapi
+ traversed_virt_pkgs = set()
+
+ reinstall_atoms = self._frozen_config.reinstall_atoms
+ for atom, child in self._minimize_children(
+ pkg, dep_priority, root_config, selected_atoms[pkg]):
+
+ # If this was a specially generated virtual atom
+ # from dep_check, map it back to the original, in
+ # order to avoid distortion in places like display
+ # or conflict resolution code.
+ is_virt = hasattr(atom, '_orig_atom')
+ atom = getattr(atom, '_orig_atom', atom)
+
+ if atom.blocker and \
+ (dep_priority.optional or dep_priority.ignored):
+ # For --with-bdeps, ignore build-time only blockers
+ # that originate from built packages.
+ continue
+
+ mypriority = dep_priority.copy()
+ if not atom.blocker:
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ mypriority.satisfied = inst_pkg
+ break
+ if not mypriority.satisfied:
+ # none visible, so use highest
+ mypriority.satisfied = inst_pkgs[0]
+
+ dep = Dependency(atom=atom,
+ blocker=atom.blocker, child=child, depth=depth, parent=pkg,
+ priority=mypriority, root=dep_root)
+
+ # In some cases, dep_check will return deps that shouldn't
+ # be proccessed any further, so they are identified and
+ # discarded here. Try to discard as few as possible since
+ # discarded dependencies reduce the amount of information
+ # available for optimization of merge order.
+ ignored = False
+ if not atom.blocker and \
+ not recurse_satisfied and \
+ mypriority.satisfied and \
+ mypriority.satisfied.visible and \
+ dep.child is not None and \
+ not dep.child.installed and \
+ self._dynamic_config._slot_pkg_map[dep.child.root].get(
+ dep.child.slot_atom) is None:
+ myarg = None
+ if dep.root == self._frozen_config.target_root:
+ try:
+ myarg = next(self._iter_atoms_for_pkg(dep.child))
+ except StopIteration:
+ pass
+ except InvalidDependString:
+ if not dep.child.installed:
+ # This shouldn't happen since the package
+ # should have been masked.
+ raise
+
+ if myarg is None:
+ # Existing child selection may not be valid unless
+ # it's added to the graph immediately, since "complete"
+ # mode may select a different child later.
+ ignored = True
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+
+ if not ignored:
+ if dep_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps:
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+ else:
+ if not self._add_dep(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+
+ selected_atoms.pop(pkg)
+
+ # Add selected indirect virtual deps to the graph. This
+ # takes advantage of circular dependency avoidance that's done
+ # by dep_zapdeps. We preserve actual parent/child relationships
+ # here in order to avoid distorting the dependency graph like
+ # <=portage-2.1.6.x did.
+ for virt_dep, atoms in selected_atoms.items():
+
+ virt_pkg = virt_dep.child
+ if virt_pkg not in traversed_virt_pkgs:
+ continue
+
+ if debug:
+ writemsg_level("\nCandidates: %s: %s\n" % \
+ (virt_pkg.cpv, [str(x) for x in atoms]),
+ noiselevel=-1, level=logging.DEBUG)
+
+ if not dep_priority.ignored or \
+ self._dynamic_config._traverse_ignored_deps:
+
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(virt_dep.atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ virt_dep.priority.satisfied = inst_pkg
+ break
+ if not virt_dep.priority.satisfied:
+ # none visible, so use highest
+ virt_dep.priority.satisfied = inst_pkgs[0]
+
+ if not self._add_pkg(virt_pkg, virt_dep):
+ return 0
+
+ for atom, child in self._minimize_children(
+ pkg, self._priority(runtime=True), root_config, atoms):
+
+ # If this was a specially generated virtual atom
+ # from dep_check, map it back to the original, in
+ # order to avoid distortion in places like display
+ # or conflict resolution code.
+ is_virt = hasattr(atom, '_orig_atom')
+ atom = getattr(atom, '_orig_atom', atom)
+
+ # This is a GLEP 37 virtual, so its deps are all runtime.
+ mypriority = self._priority(runtime=True)
+ if not atom.blocker:
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ mypriority.satisfied = inst_pkg
+ break
+ if not mypriority.satisfied:
+ # none visible, so use highest
+ mypriority.satisfied = inst_pkgs[0]
+
+ # Dependencies of virtuals are considered to have the
+ # same depth as the virtual itself.
+ dep = Dependency(atom=atom,
+ blocker=atom.blocker, child=child, depth=virt_dep.depth,
+ parent=virt_pkg, priority=mypriority, root=dep_root,
+ collapsed_parent=pkg, collapsed_priority=dep_priority)
+
+ ignored = False
+ if not atom.blocker and \
+ not recurse_satisfied and \
+ mypriority.satisfied and \
+ mypriority.satisfied.visible and \
+ dep.child is not None and \
+ not dep.child.installed and \
+ self._dynamic_config._slot_pkg_map[dep.child.root].get(
+ dep.child.slot_atom) is None:
+ myarg = None
+ if dep.root == self._frozen_config.target_root:
+ try:
+ myarg = next(self._iter_atoms_for_pkg(dep.child))
+ except StopIteration:
+ pass
+ except InvalidDependString:
+ if not dep.child.installed:
+ raise
+
+ if myarg is None:
+ ignored = True
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+
+ if not ignored:
+ if dep_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps:
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+ else:
+ if not self._add_dep(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+
+ if debug:
+ writemsg_level("\nExiting... %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return 1
+
+ def _minimize_children(self, parent, priority, root_config, atoms):
+ """
+ Selects packages to satisfy the given atoms, and minimizes the
+ number of selected packages. This serves to identify and eliminate
+ redundant package selections when multiple atoms happen to specify
+ a version range.
+ """
+
+ atom_pkg_map = {}
+
+ for atom in atoms:
+ if atom.blocker:
+ yield (atom, None)
+ continue
+ dep_pkg, existing_node = self._select_package(
+ root_config.root, atom)
+ if dep_pkg is None:
+ yield (atom, None)
+ continue
+ atom_pkg_map[atom] = dep_pkg
+
+ if len(atom_pkg_map) < 2:
+ for item in atom_pkg_map.items():
+ yield item
+ return
+
+ cp_pkg_map = {}
+ pkg_atom_map = {}
+ for atom, pkg in atom_pkg_map.items():
+ pkg_atom_map.setdefault(pkg, set()).add(atom)
+ cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
+
+ for cp, pkgs in cp_pkg_map.items():
+ if len(pkgs) < 2:
+ for pkg in pkgs:
+ for atom in pkg_atom_map[pkg]:
+ yield (atom, pkg)
+ continue
+
+ # Use a digraph to identify and eliminate any
+ # redundant package selections.
+ atom_pkg_graph = digraph()
+ cp_atoms = set()
+ for pkg1 in pkgs:
+ for atom in pkg_atom_map[pkg1]:
+ cp_atoms.add(atom)
+ atom_pkg_graph.add(pkg1, atom)
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ for pkg2 in pkgs:
+ if pkg2 is pkg1:
+ continue
+ if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
+ atom_pkg_graph.add(pkg2, atom)
+
+ for pkg in pkgs:
+ eliminate_pkg = True
+ for atom in atom_pkg_graph.parent_nodes(pkg):
+ if len(atom_pkg_graph.child_nodes(atom)) < 2:
+ eliminate_pkg = False
+ break
+ if eliminate_pkg:
+ atom_pkg_graph.remove(pkg)
+
+ # Yield ~, =*, < and <= atoms first, since those are more likely to
+ # cause slot conflicts, and we want those atoms to be displayed
+ # in the resulting slot conflict message (see bug #291142).
+ conflict_atoms = []
+ normal_atoms = []
+ for atom in cp_atoms:
+ conflict = False
+ for child_pkg in atom_pkg_graph.child_nodes(atom):
+ existing_node, matches = \
+ self._check_slot_conflict(child_pkg, atom)
+ if existing_node and not matches:
+ conflict = True
+ break
+ if conflict:
+ conflict_atoms.append(atom)
+ else:
+ normal_atoms.append(atom)
+
+ for atom in chain(conflict_atoms, normal_atoms):
+ child_pkgs = atom_pkg_graph.child_nodes(atom)
+ # if more than one child, yield highest version
+ if len(child_pkgs) > 1:
+ child_pkgs.sort()
+ yield (atom, child_pkgs[-1])
+
+ def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
+ """
+ Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
+ Yields non-disjunctive deps. Raises InvalidDependString when
+ necessary.
+ """
+ i = 0
+ while i < len(dep_struct):
+ x = dep_struct[i]
+ if isinstance(x, list):
+ for y in self._queue_disjunctive_deps(
+ pkg, dep_root, dep_priority, x):
+ yield y
+ elif x == "||":
+ self._queue_disjunction(pkg, dep_root, dep_priority,
+ [ x, dep_struct[ i + 1 ] ] )
+ i += 1
+ else:
+ try:
+ x = portage.dep.Atom(x)
+ except portage.exception.InvalidAtom:
+ if not pkg.installed:
+ raise portage.exception.InvalidDependString(
+ "invalid atom: '%s'" % x)
+ else:
+ # Note: Eventually this will check for PROPERTIES=virtual
+ # or whatever other metadata gets implemented for this
+ # purpose.
+ if x.cp.startswith('virtual/'):
+ self._queue_disjunction( pkg, dep_root,
+ dep_priority, [ str(x) ] )
+ else:
+ yield str(x)
+ i += 1
+
+ def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
+ self._dynamic_config._dep_disjunctive_stack.append(
+ (pkg, dep_root, dep_priority, dep_struct))
+
+ def _pop_disjunction(self, allow_unsatisfied):
+ """
+ Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
+ populate self._dynamic_config._dep_stack.
+ """
+ pkg, dep_root, dep_priority, dep_struct = \
+ self._dynamic_config._dep_disjunctive_stack.pop()
+ dep_string = portage.dep.paren_enclose(dep_struct,
+ unevaluated_atom=True)
+ if not self._add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
+ return 0
+ return 1
+
+ def _priority(self, **kwargs):
+ if "remove" in self._dynamic_config.myparams:
+ priority_constructor = UnmergeDepPriority
+ else:
+ priority_constructor = DepPriority
+ return priority_constructor(**kwargs)
+
+ def _dep_expand(self, root_config, atom_without_category):
+ """
+ @param root_config: a root config instance
+ @type root_config: RootConfig
+ @param atom_without_category: an atom without a category component
+ @type atom_without_category: String
+ @rtype: list
+ @returns: a list of atoms containing categories (possibly empty)
+ """
+ null_cp = portage.dep_getkey(insert_category_into_atom(
+ atom_without_category, "null"))
+ cat, atom_pn = portage.catsplit(null_cp)
+
+ dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+ categories = set()
+ for db, pkg_type, built, installed, db_keys in dbs:
+ for cat in db.categories:
+ if db.cp_list("%s/%s" % (cat, atom_pn)):
+ categories.add(cat)
+
+ deps = []
+ for cat in categories:
+ deps.append(Atom(insert_category_into_atom(
+ atom_without_category, cat), allow_repo=True))
+ return deps
+
+ def _have_new_virt(self, root, atom_cp):
+ ret = False
+ for db, pkg_type, built, installed, db_keys in \
+ self._dynamic_config._filtered_trees[root]["dbs"]:
+ if db.cp_list(atom_cp):
+ ret = True
+ break
+ return ret
+
+ def _iter_atoms_for_pkg(self, pkg):
+ depgraph_sets = self._dynamic_config.sets[pkg.root]
+ atom_arg_map = depgraph_sets.atom_arg_map
+ root_config = self._frozen_config.roots[pkg.root]
+ for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
+ if atom.cp != pkg.cp and \
+ self._have_new_virt(pkg.root, atom.cp):
+ continue
+ visible_pkgs = \
+ self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
+ visible_pkgs.reverse() # descending order
+ higher_slot = None
+ for visible_pkg in visible_pkgs:
+ if visible_pkg.cp != atom.cp:
+ continue
+ if pkg >= visible_pkg:
+ # This is descending order, and we're not
+ # interested in any versions <= pkg given.
+ break
+ if pkg.slot_atom != visible_pkg.slot_atom:
+ higher_slot = visible_pkg
+ break
+ if higher_slot is not None:
+ continue
+ for arg in atom_arg_map[(atom, pkg.root)]:
+ if isinstance(arg, PackageArg) and \
+ arg.package != pkg:
+ continue
+ yield arg, atom
+
+ def select_files(self, myfiles):
+ """Given a list of .tbz2s, .ebuilds sets, and deps, populate
+ self._dynamic_config._initial_arg_list and call self._resolve to create the
+ appropriate depgraph and return a favorite list."""
+ self._load_vdb()
+ debug = "--debug" in self._frozen_config.myopts
+ root_config = self._frozen_config.roots[self._frozen_config.target_root]
+ sets = root_config.sets
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ myfavorites=[]
+ myroot = self._frozen_config.target_root
+ dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
+ vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
+ real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
+ portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
+ bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[myroot]
+ args = []
+ onlydeps = "--onlydeps" in self._frozen_config.myopts
+ lookup_owners = []
+ for x in myfiles:
+ ext = os.path.splitext(x)[1]
+ if ext==".tbz2":
+ if not os.path.exists(x):
+ if os.path.exists(
+ os.path.join(pkgsettings["PKGDIR"], "All", x)):
+ x = os.path.join(pkgsettings["PKGDIR"], "All", x)
+ elif os.path.exists(
+ os.path.join(pkgsettings["PKGDIR"], x)):
+ x = os.path.join(pkgsettings["PKGDIR"], x)
+ else:
+ writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
+ writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
+ return 0, myfavorites
+ mytbz2=portage.xpak.tbz2(x)
+ mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
+ if os.path.realpath(x) != \
+ os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
+ writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+
+ pkg = self._pkg(mykey, "binary", root_config,
+ onlydeps=onlydeps)
+ args.append(PackageArg(arg=x, package=pkg,
+ root_config=root_config))
+ elif ext==".ebuild":
+ ebuild_path = portage.util.normalize_path(os.path.abspath(x))
+ pkgdir = os.path.dirname(ebuild_path)
+ tree_root = os.path.dirname(os.path.dirname(pkgdir))
+ cp = pkgdir[len(tree_root)+1:]
+ e = portage.exception.PackageNotFound(
+ ("%s is not in a valid portage tree " + \
+ "hierarchy or does not exist") % x)
+ if not portage.isvalidatom(cp):
+ raise e
+ cat = portage.catsplit(cp)[0]
+ mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
+ if not portage.isvalidatom("="+mykey):
+ raise e
+ ebuild_path = portdb.findname(mykey)
+ if ebuild_path:
+ if ebuild_path != os.path.join(os.path.realpath(tree_root),
+ cp, os.path.basename(ebuild_path)):
+ writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+ if mykey not in portdb.xmatch(
+ "match-visible", portage.cpv_getkey(mykey)):
+ writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
+ writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
+ writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
+ countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
+ "Continuing...")
+ else:
+ raise portage.exception.PackageNotFound(
+ "%s is not in a valid portage tree hierarchy or does not exist" % x)
+ pkg = self._pkg(mykey, "ebuild", root_config,
+ onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
+ os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
+ args.append(PackageArg(arg=x, package=pkg,
+ root_config=root_config))
+ elif x.startswith(os.path.sep):
+ if not x.startswith(myroot):
+ portage.writemsg(("\n\n!!! '%s' does not start with" + \
+ " $ROOT.\n") % x, noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+ # Queue these up since it's most efficient to handle
+ # multiple files in a single iter_owners() call.
+ lookup_owners.append(x)
+ elif x.startswith("." + os.sep) or \
+ x.startswith(".." + os.sep):
+ f = os.path.abspath(x)
+ if not f.startswith(myroot):
+ portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
+ " $ROOT.\n") % (f, x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+ lookup_owners.append(f)
+ else:
+ if x in ("system", "world"):
+ x = SETPREFIX + x
+ if x.startswith(SETPREFIX):
+ s = x[len(SETPREFIX):]
+ if s not in sets:
+ raise portage.exception.PackageSetNotFound(s)
+ if s in depgraph_sets.sets:
+ continue
+ pset = sets[s]
+ depgraph_sets.sets[s] = pset
+ args.append(SetArg(arg=x, pset=pset,
+ root_config=root_config))
+ continue
+ if not is_valid_package_atom(x, allow_repo=True):
+ portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
+ noiselevel=-1)
+ portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+ portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+ self._dynamic_config._skip_restart = True
+ return (0,[])
+ # Don't expand categories or old-style virtuals here unless
+ # necessary. Expansion of old-style virtuals here causes at
+ # least the following problems:
+ # 1) It's more difficult to determine which set(s) an atom
+ # came from, if any.
+ # 2) It takes away freedom from the resolver to choose other
+ # possible expansions when necessary.
+ if "/" in x:
+ args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
+ root_config=root_config))
+ continue
+ expanded_atoms = self._dep_expand(root_config, x)
+ installed_cp_set = set()
+ for atom in expanded_atoms:
+ if vardb.cp_list(atom.cp):
+ installed_cp_set.add(atom.cp)
+
+ if len(installed_cp_set) > 1:
+ non_virtual_cps = set()
+ for atom_cp in installed_cp_set:
+ if not atom_cp.startswith("virtual/"):
+ non_virtual_cps.add(atom_cp)
+ if len(non_virtual_cps) == 1:
+ installed_cp_set = non_virtual_cps
+
+ if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
+ installed_cp = next(iter(installed_cp_set))
+ for atom in expanded_atoms:
+ if atom.cp == installed_cp:
+ available = False
+ for pkg in self._iter_match_pkgs_any(
+ root_config, atom.without_use,
+ onlydeps=onlydeps):
+ if not pkg.installed:
+ available = True
+ break
+ if available:
+ expanded_atoms = [atom]
+ break
+
+ # If a non-virtual package and one or more virtual packages
+ # are in expanded_atoms, use the non-virtual package.
+ if len(expanded_atoms) > 1:
+ number_of_virtuals = 0
+ for expanded_atom in expanded_atoms:
+ if expanded_atom.cp.startswith("virtual/"):
+ number_of_virtuals += 1
+ else:
+ candidate = expanded_atom
+ if len(expanded_atoms) - number_of_virtuals == 1:
+ expanded_atoms = [ candidate ]
+
+ if len(expanded_atoms) > 1:
+ writemsg("\n\n", noiselevel=-1)
+ ambiguous_package_name(x, expanded_atoms, root_config,
+ self._frozen_config.spinner, self._frozen_config.myopts)
+ self._dynamic_config._skip_restart = True
+ return False, myfavorites
+ if expanded_atoms:
+ atom = expanded_atoms[0]
+ else:
+ null_atom = Atom(insert_category_into_atom(x, "null"),
+ allow_repo=True)
+ cat, atom_pn = portage.catsplit(null_atom.cp)
+ virts_p = root_config.settings.get_virts_p().get(atom_pn)
+ if virts_p:
+ # Allow the depgraph to choose which virtual.
+ atom = Atom(null_atom.replace('null/', 'virtual/', 1),
+ allow_repo=True)
+ else:
+ atom = null_atom
+
+ if atom.use and atom.use.conditional:
+ writemsg(
+ ("\n\n!!! '%s' contains a conditional " + \
+