aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Bersenev <bay@hackerdom.ru>2011-08-21 17:35:50 +0000
committerAlexander Bersenev <bay@hackerdom.ru>2011-08-21 17:35:50 +0000
commit91ffc6c50001d41fe1d16981baa32fb557463375 (patch)
tree393551fe844a9c7ee030ad71efe03a92b76ac569 /portage_with_autodep/pym/portage
parentportage integration patch is added (diff)
downloadautodep-91ffc6c50001d41fe1d16981baa32fb557463375.tar.gz
autodep-91ffc6c50001d41fe1d16981baa32fb557463375.tar.bz2
autodep-91ffc6c50001d41fe1d16981baa32fb557463375.zip
add a patched version of portage
Diffstat (limited to 'portage_with_autodep/pym/portage')
-rw-r--r--portage_with_autodep/pym/portage/__init__.py610
-rw-r--r--portage_with_autodep/pym/portage/_global_updates.py250
-rw-r--r--portage_with_autodep/pym/portage/_legacy_globals.py81
-rw-r--r--portage_with_autodep/pym/portage/_selinux.py129
-rw-r--r--portage_with_autodep/pym/portage/_sets/__init__.py245
-rw-r--r--portage_with_autodep/pym/portage/_sets/base.py264
-rw-r--r--portage_with_autodep/pym/portage/_sets/dbapi.py383
-rw-r--r--portage_with_autodep/pym/portage/_sets/files.py341
-rw-r--r--portage_with_autodep/pym/portage/_sets/libs.py98
-rw-r--r--portage_with_autodep/pym/portage/_sets/profiles.py53
-rw-r--r--portage_with_autodep/pym/portage/_sets/security.py86
-rw-r--r--portage_with_autodep/pym/portage/_sets/shell.py44
-rw-r--r--portage_with_autodep/pym/portage/cache/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/cache/anydbm.py113
-rw-r--r--portage_with_autodep/pym/portage/cache/cache_errors.py62
-rw-r--r--portage_with_autodep/pym/portage/cache/ebuild_xattr.py171
-rw-r--r--portage_with_autodep/pym/portage/cache/flat_hash.py155
-rw-r--r--portage_with_autodep/pym/portage/cache/flat_list.py134
-rw-r--r--portage_with_autodep/pym/portage/cache/fs_template.py90
-rw-r--r--portage_with_autodep/pym/portage/cache/mappings.py485
-rw-r--r--portage_with_autodep/pym/portage/cache/metadata.py154
-rw-r--r--portage_with_autodep/pym/portage/cache/metadata_overlay.py105
-rw-r--r--portage_with_autodep/pym/portage/cache/sql_template.py301
-rw-r--r--portage_with_autodep/pym/portage/cache/sqlite.py245
-rw-r--r--portage_with_autodep/pym/portage/cache/template.py236
-rw-r--r--portage_with_autodep/pym/portage/cache/util.py170
-rw-r--r--portage_with_autodep/pym/portage/cache/volatile.py25
-rw-r--r--portage_with_autodep/pym/portage/checksum.py291
-rw-r--r--portage_with_autodep/pym/portage/const.py143
-rw-r--r--portage_with_autodep/pym/portage/cvstree.py293
-rw-r--r--portage_with_autodep/pym/portage/data.py122
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_MergeProcess.py282
-rw-r--r--portage_with_autodep/pym/portage/dbapi/__init__.py302
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py72
-rw-r--r--portage_with_autodep/pym/portage/dbapi/bintree.py1366
-rw-r--r--portage_with_autodep/pym/portage/dbapi/cpv_expand.py106
-rw-r--r--portage_with_autodep/pym/portage/dbapi/dep_expand.py56
-rw-r--r--portage_with_autodep/pym/portage/dbapi/porttree.py1168
-rw-r--r--portage_with_autodep/pym/portage/dbapi/vartree.py4527
-rw-r--r--portage_with_autodep/pym/portage/dbapi/virtual.py131
-rw-r--r--portage_with_autodep/pym/portage/debug.py120
-rw-r--r--portage_with_autodep/pym/portage/dep/__init__.py2432
-rw-r--r--portage_with_autodep/pym/portage/dep/dep_check.py679
-rw-r--r--portage_with_autodep/pym/portage/dispatch_conf.py188
-rw-r--r--portage_with_autodep/pym/portage/eapi.py50
-rw-r--r--portage_with_autodep/pym/portage/eclass_cache.py123
-rw-r--r--portage_with_autodep/pym/portage/elog/__init__.py182
-rw-r--r--portage_with_autodep/pym/portage/elog/filtering.py15
-rw-r--r--portage_with_autodep/pym/portage/elog/messages.py172
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_custom.py19
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_echo.py46
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_mail.py43
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_mail_summary.py89
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_save.py51
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_save_summary.py59
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_syslog.py32
-rw-r--r--portage_with_autodep/pym/portage/env/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/env/config.py105
-rw-r--r--portage_with_autodep/pym/portage/env/loaders.py319
-rw-r--r--portage_with_autodep/pym/portage/env/validators.py20
-rw-r--r--portage_with_autodep/pym/portage/exception.py186
-rw-r--r--portage_with_autodep/pym/portage/getbinpkg.py861
-rw-r--r--portage_with_autodep/pym/portage/glsa.py699
-rw-r--r--portage_with_autodep/pym/portage/localization.py20
-rw-r--r--portage_with_autodep/pym/portage/locks.py395
-rw-r--r--portage_with_autodep/pym/portage/mail.py177
-rw-r--r--portage_with_autodep/pym/portage/manifest.py538
-rw-r--r--portage_with_autodep/pym/portage/news.py351
-rw-r--r--portage_with_autodep/pym/portage/output.py794
-rw-r--r--portage_with_autodep/pym/portage/package/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py284
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py236
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py182
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py189
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py235
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.py233
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.py23
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/features_set.py128
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/helper.py64
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py185
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.py27
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.py9
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py98
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py82
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/config.py2224
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.py42
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/digestcheck.py167
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/digestgen.py202
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/doebuild.py1791
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/fetch.py1129
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py124
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py174
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py370
-rw-r--r--portage_with_autodep/pym/portage/process.py427
-rw-r--r--portage_with_autodep/pym/portage/proxy/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/proxy/lazyimport.py212
-rw-r--r--portage_with_autodep/pym/portage/proxy/objectproxy.py91
-rw-r--r--portage_with_autodep/pym/portage/repository/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/repository/config.py504
-rw-r--r--portage_with_autodep/pym/portage/tests/__init__.py244
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/setup_env.py85
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/test_dobin.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/test_dodir.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py58
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testAtom.py315
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py219
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py18
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py75
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testStandalone.py36
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py43
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py28
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_get_operator.py33
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py42
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_isjustname.py24
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py146
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py108
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py66
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py627
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py43
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_config.py198
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py82
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py124
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py32
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py52
-rw-r--r--portage_with_autodep/pym/portage/tests/env/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/env/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py37
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py39
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py145
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py81
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.py42
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_compile_modules.py46
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_import_modules.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py124
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py46
-rw-r--r--portage_with_autodep/pym/portage/tests/news/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/news/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/news/test_NewsItem.py95
-rw-r--r--portage_with_autodep/pym/portage/tests/process/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/process/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/process/test_poll.py39
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py690
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py326
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py169
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py84
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_depclean.py285
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_depth.py252
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_eapi.py115
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py453
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py31
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py318
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_multislot.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_output.py88
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py138
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_required_use.py114
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_simple.py57
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py143
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py40
-rwxr-xr-xportage_with_autodep/pym/portage/tests/runTests46
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py61
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py32
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py27
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/testShell.py28
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/test_string_format.py108
-rw-r--r--portage_with_autodep/pym/portage/tests/util/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/util/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_digraph.py201
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_getconfig.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_grabdict.py11
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py14
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackDictList.py17
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackDicts.py36
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackLists.py19
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py24
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_varExpand.py92
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/test_vercmp.py80
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py16
-rw-r--r--portage_with_autodep/pym/portage/update.py320
-rw-r--r--portage_with_autodep/pym/portage/util/ExtractKernelVersion.py76
-rw-r--r--portage_with_autodep/pym/portage/util/__init__.py1602
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py805
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py172
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/util/_pty.py212
-rw-r--r--portage_with_autodep/pym/portage/util/digraph.py342
-rw-r--r--portage_with_autodep/pym/portage/util/env_update.py293
-rw-r--r--portage_with_autodep/pym/portage/util/lafilefixer.py185
-rw-r--r--portage_with_autodep/pym/portage/util/listdir.py151
-rw-r--r--portage_with_autodep/pym/portage/util/movefile.py242
-rw-r--r--portage_with_autodep/pym/portage/util/mtimedb.py81
-rw-r--r--portage_with_autodep/pym/portage/versions.py403
-rw-r--r--portage_with_autodep/pym/portage/xml/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/xml/metadata.py376
-rw-r--r--portage_with_autodep/pym/portage/xpak.py497
238 files changed, 45785 insertions, 0 deletions
diff --git a/portage_with_autodep/pym/portage/__init__.py b/portage_with_autodep/pym/portage/__init__.py
new file mode 100644
index 0000000..2a2eb99
--- /dev/null
+++ b/portage_with_autodep/pym/portage/__init__.py
@@ -0,0 +1,610 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+VERSION="HEAD"
+
+# ===========================================================================
+# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
+# ===========================================================================
+
+try:
+ import sys
+ import errno
+ if not hasattr(errno, 'ESTALE'):
+ # ESTALE may not be defined on some systems, such as interix.
+ errno.ESTALE = -1
+ import re
+ import types
+
+ # Try the commands module first, since this allows us to eliminate
+ # the subprocess module from the baseline imports under python2.
+ try:
+ from commands import getstatusoutput as subprocess_getstatusoutput
+ except ImportError:
+ from subprocess import getstatusoutput as subprocess_getstatusoutput
+
+ import platform
+
+ # Temporarily delete these imports, to ensure that only the
+ # wrapped versions are imported by portage internals.
+ import os
+ del os
+ import shutil
+ del shutil
+
+except ImportError as e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
+ sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
+ sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
+
+ sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
+ sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
+ sys.stderr.write(" "+str(e)+"\n\n");
+ raise
+
+try:
+
+ import portage.proxy.lazyimport
+ import portage.proxy as proxy
+ proxy.lazyimport.lazyimport(globals(),
+ 'portage.cache.cache_errors:CacheError',
+ 'portage.checksum',
+ 'portage.checksum:perform_checksum,perform_md5,prelink_capable',
+ 'portage.cvstree',
+ 'portage.data',
+ 'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
+ 'uid,userland,userpriv_groups,wheelgid',
+ 'portage.dbapi',
+ 'portage.dbapi.bintree:bindbapi,binarytree',
+ 'portage.dbapi.cpv_expand:cpv_expand',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,' + \
+ 'portagetree,portdbapi',
+ 'portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree',
+ 'portage.dbapi.virtual:fakedbapi',
+ 'portage.dep',
+ 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
+ 'flatten,get_operator,isjustname,isspecific,isvalidatom,' + \
+ 'match_from_list,match_to_list',
+ 'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
+ 'portage.eclass_cache',
+ 'portage.exception',
+ 'portage.getbinpkg',
+ 'portage.locks',
+ 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
+ 'portage.mail',
+ 'portage.manifest:Manifest',
+ 'portage.output',
+ 'portage.output:bold,colorize',
+ 'portage.package.ebuild.doebuild:doebuild,' + \
+ 'doebuild_environment,spawn,spawnebuild',
+ 'portage.package.ebuild.config:autouse,best_from_dict,' + \
+ 'check_config_instance,config',
+ 'portage.package.ebuild.deprecated_profile_check:' + \
+ 'deprecated_profile_check',
+ 'portage.package.ebuild.digestcheck:digestcheck',
+ 'portage.package.ebuild.digestgen:digestgen',
+ 'portage.package.ebuild.fetch:fetch',
+ 'portage.package.ebuild.getmaskingreason:getmaskingreason',
+ 'portage.package.ebuild.getmaskingstatus:getmaskingstatus',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+ 'portage.process',
+ 'portage.process:atexit_register,run_exitfuncs',
+ 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
+ 'parse_updates,update_config_files,update_dbentries,' + \
+ 'update_dbentry',
+ 'portage.util',
+ 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
+ 'apply_recursive_permissions,dump_traceback,getconfig,' + \
+ 'grabdict,grabdict_package,grabfile,grabfile_package,' + \
+ 'map_dictlist_vals,new_protect_filename,normalize_path,' + \
+ 'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
+ 'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
+ 'writemsg_stdout,write_atomic',
+ 'portage.util.digraph:digraph',
+ 'portage.util.env_update:env_update',
+ 'portage.util.ExtractKernelVersion:ExtractKernelVersion',
+ 'portage.util.listdir:cacheddir,listdir',
+ 'portage.util.movefile:movefile',
+ 'portage.util.mtimedb:MtimeDB',
+ 'portage.versions',
+ 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,' + \
+ 'cpv_getkey@getCPFromCPV,endversion_keys,' + \
+ 'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
+ 'portage.xpak',
+ 'time',
+ )
+
+ try:
+ from collections import OrderedDict
+ except ImportError:
+ proxy.lazyimport.lazyimport(globals(),
+ 'portage.cache.mappings:OrderedDict')
+
+ import portage.const
+ from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
+ USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
+ PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
+ EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
+ MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
+ DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
+ INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
+ INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
+
+except ImportError as e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
+ sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
+ sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
+ sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
+ sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
+ sys.stderr.write("!!! a recovery of portage.\n")
+ sys.stderr.write(" "+str(e)+"\n\n")
+ raise
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+# Assume utf_8 fs encoding everywhere except in merge code, where the
+# user's locale is respected.
+_encodings = {
+ 'content' : 'utf_8',
+ 'fs' : 'utf_8',
+ 'merge' : sys.getfilesystemencoding(),
+ 'repo.content' : 'utf_8',
+ 'stdio' : 'utf_8',
+}
+
+# This can happen if python is built with USE=build (stage 1).
+if _encodings['merge'] is None:
+ _encodings['merge'] = 'ascii'
+
+if sys.hexversion >= 0x3000000:
+ def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
+ if isinstance(s, str):
+ s = s.encode(encoding, errors)
+ return s
+
+ def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
+ if isinstance(s, bytes):
+ s = str(s, encoding=encoding, errors=errors)
+ return s
+else:
+ def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
+ if isinstance(s, unicode):
+ s = s.encode(encoding, errors)
+ return s
+
+ def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
+ if isinstance(s, bytes):
+ s = unicode(s, encoding=encoding, errors=errors)
+ return s
+
+class _unicode_func_wrapper(object):
+ """
+ Wraps a function, converts arguments from unicode to bytes,
+ and return values to unicode from bytes. Function calls
+ will raise UnicodeEncodeError if an argument fails to be
+ encoded with the required encoding. Return values that
+ are single strings are decoded with errors='replace'. Return
+ values that are lists of strings are decoded with errors='strict'
+ and elements that fail to be decoded are omitted from the returned
+ list.
+ """
+ __slots__ = ('_func', '_encoding')
+
+ def __init__(self, func, encoding=_encodings['fs']):
+ self._func = func
+ self._encoding = encoding
+
+ def __call__(self, *args, **kwargs):
+
+ encoding = self._encoding
+ wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in args]
+ if kwargs:
+ wrapped_kwargs = dict(
+ (k, _unicode_encode(v, encoding=encoding, errors='strict'))
+ for k, v in kwargs.items())
+ else:
+ wrapped_kwargs = {}
+
+ rval = self._func(*wrapped_args, **wrapped_kwargs)
+
+ # Don't use isinstance() since we don't want to convert subclasses
+ # of tuple such as posix.stat_result in python-3.2.
+ if rval.__class__ in (list, tuple):
+ decoded_rval = []
+ for x in rval:
+ try:
+ x = _unicode_decode(x, encoding=encoding, errors='strict')
+ except UnicodeDecodeError:
+ pass
+ else:
+ decoded_rval.append(x)
+
+ if isinstance(rval, tuple):
+ rval = tuple(decoded_rval)
+ else:
+ rval = decoded_rval
+ else:
+ rval = _unicode_decode(rval, encoding=encoding, errors='replace')
+
+ return rval
+
+class _unicode_module_wrapper(object):
+ """
+ Wraps a module and wraps all functions with _unicode_func_wrapper.
+ """
+ __slots__ = ('_mod', '_encoding', '_overrides', '_cache')
+
+ def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
+ object.__setattr__(self, '_mod', mod)
+ object.__setattr__(self, '_encoding', encoding)
+ object.__setattr__(self, '_overrides', overrides)
+ if cache:
+ cache = {}
+ else:
+ cache = None
+ object.__setattr__(self, '_cache', cache)
+
+ def __getattribute__(self, attr):
+ cache = object.__getattribute__(self, '_cache')
+ if cache is not None:
+ result = cache.get(attr)
+ if result is not None:
+ return result
+ result = getattr(object.__getattribute__(self, '_mod'), attr)
+ encoding = object.__getattribute__(self, '_encoding')
+ overrides = object.__getattribute__(self, '_overrides')
+ override = None
+ if overrides is not None:
+ override = overrides.get(id(result))
+ if override is not None:
+ result = override
+ elif isinstance(result, type):
+ pass
+ elif type(result) is types.ModuleType:
+ result = _unicode_module_wrapper(result,
+ encoding=encoding, overrides=overrides)
+ elif hasattr(result, '__call__'):
+ result = _unicode_func_wrapper(result, encoding=encoding)
+ if cache is not None:
+ cache[attr] = result
+ return result
+
+import os as _os
+_os_overrides = {
+ id(_os.fdopen) : _os.fdopen,
+ id(_os.mkfifo) : _os.mkfifo,
+ id(_os.popen) : _os.popen,
+ id(_os.read) : _os.read,
+ id(_os.system) : _os.system,
+}
+
+if hasattr(_os, 'statvfs'):
+ _os_overrides[id(_os.statvfs)] = _os.statvfs
+
+os = _unicode_module_wrapper(_os, overrides=_os_overrides,
+ encoding=_encodings['fs'])
+_os_merge = _unicode_module_wrapper(_os,
+ encoding=_encodings['merge'], overrides=_os_overrides)
+
+import shutil as _shutil
+shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
+
+# Imports below this point rely on the above unicode wrapper definitions.
+try:
+ __import__('selinux')
+ import portage._selinux
+ selinux = _unicode_module_wrapper(_selinux,
+ encoding=_encodings['fs'])
+ _selinux_merge = _unicode_module_wrapper(_selinux,
+ encoding=_encodings['merge'])
+except (ImportError, OSError) as e:
+ if isinstance(e, OSError):
+ sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
+ del e
+ _selinux = None
+ selinux = None
+ _selinux_merge = None
+
+# ===========================================================================
+# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
+# ===========================================================================
+
+_python_interpreter = os.path.realpath(sys.executable)
+_bin_path = PORTAGE_BIN_PATH
+_pym_path = PORTAGE_PYM_PATH
+
+def _shell_quote(s):
+ """
+ Quote a string in double-quotes and use backslashes to
+ escape any backslashes, double-quotes, dollar signs, or
+ backquotes in the string.
+ """
+ for letter in "\\\"$`":
+ if letter in s:
+ s = s.replace(letter, "\\" + letter)
+ return "\"%s\"" % s
+
+bsd_chflags = None
+
+if platform.system() in ('FreeBSD',):
+
+ class bsd_chflags(object):
+
+ @classmethod
+ def chflags(cls, path, flags, opts=""):
+ cmd = 'chflags %s %o %s' % (opts, flags, _shell_quote(path))
+ status, output = subprocess_getstatusoutput(cmd)
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ return
+ # Try to generate an ENOENT error if appropriate.
+ if 'h' in opts:
+ _os_merge.lstat(path)
+ else:
+ _os_merge.stat(path)
+ # Make sure the binary exists.
+ if not portage.process.find_binary('chflags'):
+ raise portage.exception.CommandNotFound('chflags')
+ # Now we're not sure exactly why it failed or what
+ # the real errno was, so just report EPERM.
+ e = OSError(errno.EPERM, output)
+ e.errno = errno.EPERM
+ e.filename = path
+ e.message = output
+ raise e
+
+ @classmethod
+ def lchflags(cls, path, flags):
+ return cls.chflags(path, flags, opts='-h')
+
+def load_mod(name):
+ modname = ".".join(name.split(".")[:-1])
+ mod = __import__(modname)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def getcwd():
+ "this fixes situations where the current directory doesn't exist"
+ try:
+ return os.getcwd()
+ except OSError: #dir doesn't exist
+ os.chdir("/")
+ return "/"
+getcwd()
+
+def abssymlink(symlink):
+ "This reads symlinks, resolving the relative symlinks, and returning the absolute."
+ mylink=os.readlink(symlink)
+ if mylink[0] != '/':
+ mydir=os.path.dirname(symlink)
+ mylink=mydir+"/"+mylink
+ return os.path.normpath(mylink)
+
+_doebuild_manifest_exempt_depend = 0
+
+_testing_eapis = frozenset(["4-python"])
+_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1"])
+
+def _eapi_is_deprecated(eapi):
+ return eapi in _deprecated_eapis
+
+def eapi_is_supported(eapi):
+ if not isinstance(eapi, basestring):
+ # Only call str() when necessary since with python2 it
+ # can trigger UnicodeEncodeError if EAPI is corrupt.
+ eapi = str(eapi)
+ eapi = eapi.strip()
+
+ if _eapi_is_deprecated(eapi):
+ return True
+
+ if eapi in _testing_eapis:
+ return True
+
+ try:
+ eapi = int(eapi)
+ except ValueError:
+ eapi = -1
+ if eapi < 0:
+ return False
+ return eapi <= portage.const.EAPI
+
+# Generally, it's best not to assume that cache entries for unsupported EAPIs
+# can be validated. However, the current package manager specification does not
+# guarantee that the EAPI can be parsed without sourcing the ebuild, so
+# it's too costly to discard existing cache entries for unsupported EAPIs.
+# Therefore, by default, assume that cache entries for unsupported EAPIs can be
+# validated. If FEATURES=parse-eapi-* is enabled, this assumption is discarded
+# since the EAPI can be determined without the incurring the cost of sourcing
+# the ebuild.
+_validate_cache_for_unsupported_eapis = True
+
+_parse_eapi_ebuild_head_re = re.compile(r'^EAPI=[\'"]?([^\'"#]*)')
+_parse_eapi_ebuild_head_max_lines = 30
+
+def _parse_eapi_ebuild_head(f):
+ count = 0
+ for line in f:
+ m = _parse_eapi_ebuild_head_re.match(line)
+ if m is not None:
+ return m.group(1).strip()
+ count += 1
+ if count >= _parse_eapi_ebuild_head_max_lines:
+ break
+ return '0'
+
+def _movefile(src, dest, **kwargs):
+ """Calls movefile and raises a PortageException if an error occurs."""
+ if movefile(src, dest, **kwargs) is None:
+ raise portage.exception.PortageException(
+ "mv '%s' '%s'" % (src, dest))
+
+auxdbkeys = (
+ 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
+ 'PDEPEND', 'PROVIDE', 'EAPI',
+ 'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
+ 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
+)
+auxdbkeylen=len(auxdbkeys)
+
+def portageexit():
+ if data.secpass > 1 and os.environ.get("SANDBOX_ON") != "1":
+ close_portdbapi_caches()
+
+def create_trees(config_root=None, target_root=None, trees=None):
+ if trees is None:
+ trees = {}
+ else:
+ # clean up any existing portdbapi instances
+ for myroot in trees:
+ portdb = trees[myroot]["porttree"].dbapi
+ portdb.close_caches()
+ portdbapi.portdbapi_instances.remove(portdb)
+ del trees[myroot]["porttree"], myroot, portdb
+
+ settings = config(config_root=config_root, target_root=target_root,
+ config_incrementals=portage.const.INCREMENTALS)
+ settings.lock()
+
+ myroots = [(settings["ROOT"], settings)]
+ if settings["ROOT"] != "/":
+
+ # When ROOT != "/" we only want overrides from the calling
+ # environment to apply to the config that's associated
+ # with ROOT != "/", so pass a nearly empty dict for the env parameter.
+ clean_env = {}
+ for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_USERNAME',
+ 'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
+ 'ftp_proxy', 'http_proxy', 'no_proxy'):
+ v = settings.get(k)
+ if v is not None:
+ clean_env[k] = v
+ settings = config(config_root=None, target_root="/", env=clean_env)
+ settings.lock()
+ myroots.append((settings["ROOT"], settings))
+
+ for myroot, mysettings in myroots:
+ trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
+ trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals)
+ trees[myroot].addLazySingleton(
+ "vartree", vartree, myroot, categories=mysettings.categories,
+ settings=mysettings)
+ trees[myroot].addLazySingleton("porttree",
+ portagetree, myroot, settings=mysettings)
+ trees[myroot].addLazySingleton("bintree",
+ binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
+ return trees
+
+if VERSION == 'HEAD':
+ class _LazyVersion(proxy.objectproxy.ObjectProxy):
+ def _get_target(self):
+ global VERSION
+ if VERSION is not self:
+ return VERSION
+ if os.path.isdir(os.path.join(PORTAGE_BASE_PATH, '.git')):
+ status, output = subprocess_getstatusoutput((
+ "cd %s ; git describe --tags || exit $? ; " + \
+ "if [ -n \"`git diff-index --name-only --diff-filter=M HEAD`\" ] ; " + \
+ "then echo modified ; git rev-list --format=%%ct -n 1 HEAD ; fi ; " + \
+ "exit 0") % _shell_quote(PORTAGE_BASE_PATH))
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ output_lines = output.splitlines()
+ if output_lines:
+ version_split = output_lines[0].split('-')
+ if version_split:
+ VERSION = version_split[0].lstrip('v')
+ patchlevel = False
+ if len(version_split) > 1:
+ patchlevel = True
+ VERSION = "%s_p%s" %(VERSION, version_split[1])
+ if len(output_lines) > 1 and output_lines[1] == 'modified':
+ head_timestamp = None
+ if len(output_lines) > 3:
+ try:
+ head_timestamp = long(output_lines[3])
+ except ValueError:
+ pass
+ timestamp = long(time.time())
+ if head_timestamp is not None and timestamp > head_timestamp:
+ timestamp = timestamp - head_timestamp
+ if not patchlevel:
+ VERSION = "%s_p0" % (VERSION,)
+ VERSION = "%s_p%d" % (VERSION, timestamp)
+ return VERSION
+ VERSION = 'HEAD'
+ return VERSION
+ VERSION = _LazyVersion()
+
+if "_legacy_globals_constructed" in globals():
+ # The module has been reloaded, so perform any relevant cleanup
+ # and prevent memory leaks.
+ if "db" in _legacy_globals_constructed:
+ try:
+ db
+ except NameError:
+ pass
+ else:
+ if isinstance(db, dict) and db:
+ for _x in db.values():
+ try:
+ if "porttree" in _x.lazy_items:
+ continue
+ except (AttributeError, TypeError):
+ continue
+ try:
+ _x = _x["porttree"].dbapi
+ except (AttributeError, KeyError):
+ continue
+ if not isinstance(_x, portdbapi):
+ continue
+ _x.close_caches()
+ try:
+ portdbapi.portdbapi_instances.remove(_x)
+ except ValueError:
+ pass
+ del _x
+
+class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
+
+ __slots__ = ('_name',)
+
+ def __init__(self, name):
+ proxy.objectproxy.ObjectProxy.__init__(self)
+ object.__setattr__(self, '_name', name)
+
+ def _get_target(self):
+ name = object.__getattribute__(self, '_name')
+ from portage._legacy_globals import _get_legacy_global
+ return _get_legacy_global(name)
+
+_legacy_global_var_names = ("archlist", "db", "features",
+ "groups", "mtimedb", "mtimedbfile", "pkglines",
+ "portdb", "profiledir", "root", "selinux_enabled",
+ "settings", "thirdpartymirrors")
+
+for k in _legacy_global_var_names:
+ globals()[k] = _LegacyGlobalProxy(k)
+del k
+
+_legacy_globals_constructed = set()
+
+def _disable_legacy_globals():
+ """
+ This deletes the ObjectProxy instances that are used
+ for lazy initialization of legacy global variables.
+ The purpose of deleting them is to prevent new code
+ from referencing these deprecated variables.
+ """
+ global _legacy_global_var_names
+ for k in _legacy_global_var_names:
+ globals().pop(k, None)
diff --git a/portage_with_autodep/pym/portage/_global_updates.py b/portage_with_autodep/pym/portage/_global_updates.py
new file mode 100644
index 0000000..868d1ee
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_global_updates.py
@@ -0,0 +1,250 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import stat
+
+from portage import best, os
+from portage.const import WORLD_FILE
+from portage.data import secpass
+from portage.exception import DirectoryNotFound
+from portage.localization import _
+from portage.output import bold, colorize
+from portage.update import grab_updates, parse_updates, update_config_files, update_dbentry
+from portage.util import grabfile, shlex_split, \
+ writemsg, writemsg_stdout, write_atomic
+
+def _global_updates(trees, prev_mtimes, quiet=False):
+ """
+ Perform new global updates if they exist in 'profiles/updates/'
+ subdirectories of all active repositories (PORTDIR + PORTDIR_OVERLAY).
+ This simply returns if ROOT != "/" (when len(trees) != 1). If ROOT != "/"
+ then the user should instead use emaint --fix movebin and/or moveinst.
+
+ @param trees: A dictionary containing portage trees.
+ @type trees: dict
+ @param prev_mtimes: A dictionary containing mtimes of files located in
+ $PORTDIR/profiles/updates/.
+ @type prev_mtimes: dict
+ @rtype: bool
+ @return: True if update commands have been performed, otherwise False
+ """
+ # only do this if we're root and not running repoman/ebuild digest
+
+ retupd = False
+ if secpass < 2 or \
+ "SANDBOX_ACTIVE" in os.environ or \
+ len(trees) != 1:
+ return retupd
+ root = "/"
+ mysettings = trees[root]["vartree"].settings
+ portdb = trees[root]["porttree"].dbapi
+ vardb = trees[root]["vartree"].dbapi
+ bindb = trees[root]["bintree"].dbapi
+ if not os.access(bindb.bintree.pkgdir, os.W_OK):
+ bindb = None
+ else:
+ # Call binarytree.populate(), since we want to make sure it's
+ # only populated with local packages here (getbinpkgs=0).
+ bindb.bintree.populate()
+
+ world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
+ world_list = grabfile(world_file)
+ world_modified = False
+ world_warnings = set()
+ updpath_map = {}
+ # Maps repo_name to list of updates. If a given repo has no updates
+ # directory, it will be omitted. If a repo has an updates directory
+ # but none need to be applied (according to timestamp logic), the
+ # value in the dict will be an empty list.
+ repo_map = {}
+ timestamps = {}
+
+ update_notice_printed = False
+ for repo_name in portdb.getRepositories():
+ repo = portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ if updpath in updpath_map:
+ repo_map[repo_name] = updpath_map[updpath]
+ continue
+
+ try:
+ if mysettings.get("PORTAGE_CALLER") == "fixpackages":
+ update_data = grab_updates(updpath)
+ else:
+ update_data = grab_updates(updpath, prev_mtimes)
+ except DirectoryNotFound:
+ continue
+ myupd = []
+ updpath_map[updpath] = myupd
+ repo_map[repo_name] = myupd
+ if len(update_data) > 0:
+ for mykey, mystat, mycontent in update_data:
+ if not update_notice_printed:
+ update_notice_printed = True
+ writemsg_stdout("\n")
+ if quiet:
+ writemsg_stdout(colorize("GOOD",
+ _("Performing Global Updates\n")))
+ writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+ else:
+ writemsg_stdout(colorize("GOOD",
+ _("Performing Global Updates:\n")))
+ writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+ writemsg_stdout(_(" %s='update pass' %s='binary update' "
+ "%s='/var/db update' %s='/var/db move'\n"
+ " %s='/var/db SLOT move' %s='binary move' "
+ "%s='binary SLOT move'\n %s='update /etc/portage/package.*'\n") % \
+ (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
+ valid_updates, errors = parse_updates(mycontent)
+ myupd.extend(valid_updates)
+ if not quiet:
+ writemsg_stdout(bold(mykey))
+ writemsg_stdout(len(valid_updates) * "." + "\n")
+ if len(errors) == 0:
+ # Update our internal mtime since we
+ # processed all of our directives.
+ timestamps[mykey] = mystat[stat.ST_MTIME]
+ else:
+ for msg in errors:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ if myupd:
+ retupd = True
+
+ master_repo = portdb.getRepositoryName(portdb.porttree_root)
+ if master_repo in repo_map:
+ repo_map['DEFAULT'] = repo_map[master_repo]
+
+ for repo_name, myupd in repo_map.items():
+ if repo_name == 'DEFAULT':
+ continue
+ if not myupd:
+ continue
+
+ def repo_match(repository):
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ def _world_repo_match(atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
+ if matches and \
+ repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
+ if portdb.match(atoma):
+ world_warnings.add((atoma, atomb))
+ return True
+ else:
+ return False
+
+ for update_cmd in myupd:
+ for pos, atom in enumerate(world_list):
+ new_atom = update_dbentry(update_cmd, atom)
+ if atom != new_atom:
+ if _world_repo_match(atom, new_atom):
+ world_list[pos] = new_atom
+ world_modified = True
+
+ for update_cmd in myupd:
+ if update_cmd[0] == "move":
+ moves = vardb.move_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "@")
+ if bindb:
+ moves = bindb.move_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "%")
+ elif update_cmd[0] == "slotmove":
+ moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "s")
+ if bindb:
+ moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "S")
+
+ if world_modified:
+ world_list.sort()
+ write_atomic(world_file,
+ "".join("%s\n" % (x,) for x in world_list))
+ if world_warnings:
+ # XXX: print warning that we've updated world entries
+ # and the old name still matches something (from an overlay)?
+ pass
+
+ if retupd:
+
+ def _config_repo_match(repo_name, atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
+ if not matches:
+ return False
+ repository = vardb.aux_get(best(matches), ['repository'])[0]
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ update_config_files(root,
+ shlex_split(mysettings.get("CONFIG_PROTECT", "")),
+ shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
+ repo_map, match_callback=_config_repo_match)
+
+ # The above global updates proceed quickly, so they
+ # are considered a single mtimedb transaction.
+ if timestamps:
+ # We do not update the mtime in the mtimedb
+ # until after _all_ of the above updates have
+ # been processed because the mtimedb will
+ # automatically commit when killed by ctrl C.
+ for mykey, mtime in timestamps.items():
+ prev_mtimes[mykey] = mtime
+
+ do_upgrade_packagesmessage = False
+ # We gotta do the brute force updates for these now.
+ if mysettings.get("PORTAGE_CALLER") == "fixpackages" or \
+ "fixpackages" in mysettings.features:
+ def onUpdate(maxval, curval):
+ if curval > 0:
+ writemsg_stdout("#")
+ if quiet:
+ onUpdate = None
+ vardb.update_ents(repo_map, onUpdate=onUpdate)
+ if bindb:
+ def onUpdate(maxval, curval):
+ if curval > 0:
+ writemsg_stdout("*")
+ if quiet:
+ onUpdate = None
+ bindb.update_ents(repo_map, onUpdate=onUpdate)
+ else:
+ do_upgrade_packagesmessage = 1
+
+ # Update progress above is indicated by characters written to stdout so
+ # we print a couple new lines here to separate the progress output from
+ # what follows.
+ print()
+ print()
+
+ if do_upgrade_packagesmessage and bindb and \
+ bindb.cpv_all():
+ writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
+ writemsg_stdout(bold(_("Note: This can take a very long time.")))
+ writemsg_stdout("\n")
+
+ return retupd
diff --git a/portage_with_autodep/pym/portage/_legacy_globals.py b/portage_with_autodep/pym/portage/_legacy_globals.py
new file mode 100644
index 0000000..615591a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_legacy_globals.py
@@ -0,0 +1,81 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.const import CACHE_PATH, PROFILE_PATH
+
+def _get_legacy_global(name):
+ constructed = portage._legacy_globals_constructed
+ if name in constructed:
+ return getattr(portage, name)
+
+ if name == 'portdb':
+ portage.portdb = portage.db[portage.root]["porttree"].dbapi
+ constructed.add(name)
+ return getattr(portage, name)
+
+ elif name in ('mtimedb', 'mtimedbfile'):
+ portage.mtimedbfile = os.path.join(portage.settings['EROOT'],
+ CACHE_PATH, "mtimedb")
+ constructed.add('mtimedbfile')
+ portage.mtimedb = portage.MtimeDB(portage.mtimedbfile)
+ constructed.add('mtimedb')
+ return getattr(portage, name)
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(0o22)
+
+ kwargs = {}
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+ kwargs[k] = os.environ.get(envvar)
+
+ portage._initializing_globals = True
+ portage.db = portage.create_trees(**kwargs)
+ constructed.add('db')
+ del portage._initializing_globals
+
+ settings = portage.db["/"]["vartree"].settings
+
+ for root in portage.db:
+ if root != "/":
+ settings = portage.db[root]["vartree"].settings
+ break
+
+ portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
+
+ portage.settings = settings
+ constructed.add('settings')
+
+ portage.root = root
+ constructed.add('root')
+
+ # COMPATIBILITY
+ # These attributes should not be used within
+ # Portage under any circumstances.
+
+ portage.archlist = settings.archlist()
+ constructed.add('archlist')
+
+ portage.features = settings.features
+ constructed.add('features')
+
+ portage.groups = settings["ACCEPT_KEYWORDS"].split()
+ constructed.add('groups')
+
+ portage.pkglines = settings.packages
+ constructed.add('pkglines')
+
+ portage.selinux_enabled = settings.selinux_enabled()
+ constructed.add('selinux_enabled')
+
+ portage.thirdpartymirrors = settings.thirdpartymirrors()
+ constructed.add('thirdpartymirrors')
+
+ profiledir = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
+ if not os.path.isdir(profiledir):
+ profiledir = None
+ portage.profiledir = profiledir
+ constructed.add('profiledir')
+
+ return getattr(portage, name)
diff --git a/portage_with_autodep/pym/portage/_selinux.py b/portage_with_autodep/pym/portage/_selinux.py
new file mode 100644
index 0000000..9470978
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_selinux.py
@@ -0,0 +1,129 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Don't use the unicode-wrapped os and shutil modules here since
+# the whole _selinux module itself will be wrapped.
+import os
+import shutil
+
+import portage
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.localization import _
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'selinux')
+
+def copyfile(src, dest):
+ src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
+ dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(src)
+ if rc < 0:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ raise OSError(_("copyfile: Failed getting context of \"%s\".") % src)
+
+ setfscreate(ctx)
+ try:
+ shutil.copyfile(src, dest)
+ finally:
+ setfscreate()
+
+def getcontext():
+ (rc, ctx) = selinux.getcon()
+ if rc < 0:
+ raise OSError(_("getcontext: Failed getting current process context."))
+
+ return ctx
+
+def is_selinux_enabled():
+ return selinux.is_selinux_enabled()
+
+def mkdir(target, refdir):
+ target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
+ refdir = _unicode_encode(refdir, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.getfilecon(refdir)
+ if rc < 0:
+ refdir = _unicode_decode(refdir, encoding=_encodings['fs'],
+ errors='replace')
+ raise OSError(
+ _("mkdir: Failed getting context of reference directory \"%s\".") \
+ % refdir)
+
+ setfscreate(ctx)
+ try:
+ os.mkdir(target)
+ finally:
+ setfscreate()
+
+def rename(src, dest):
+ src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
+ dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(src)
+ if rc < 0:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ raise OSError(_("rename: Failed getting context of \"%s\".") % src)
+
+ setfscreate(ctx)
+ try:
+ os.rename(src,dest)
+ finally:
+ setfscreate()
+
+def settype(newtype):
+ ret = getcontext().split(":")
+ ret[2] = newtype
+ return ":".join(ret)
+
+def setexec(ctx="\n"):
+ ctx = _unicode_encode(ctx, encoding=_encodings['content'], errors='strict')
+ if selinux.setexeccon(ctx) < 0:
+ ctx = _unicode_decode(ctx, encoding=_encodings['content'],
+ errors='replace')
+ if selinux.security_getenforce() == 1:
+ raise OSError(_("Failed setting exec() context \"%s\".") % ctx)
+ else:
+ portage.writemsg("!!! " + \
+ _("Failed setting exec() context \"%s\".") % ctx, \
+ noiselevel=-1)
+
+def setfscreate(ctx="\n"):
+ ctx = _unicode_encode(ctx,
+ encoding=_encodings['content'], errors='strict')
+ if selinux.setfscreatecon(ctx) < 0:
+ ctx = _unicode_decode(ctx,
+ encoding=_encodings['content'], errors='replace')
+ raise OSError(
+ _("setfscreate: Failed setting fs create context \"%s\".") % ctx)
+
+def spawn_wrapper(spawn_func, selinux_type):
+
+ selinux_type = _unicode_encode(selinux_type,
+ encoding=_encodings['content'], errors='strict')
+
+ def wrapper_func(*args, **kwargs):
+ con = settype(selinux_type)
+ setexec(con)
+ try:
+ return spawn_func(*args, **kwargs)
+ finally:
+ setexec()
+
+ return wrapper_func
+
+def symlink(target, link, reflnk):
+ target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
+ link = _unicode_encode(link, encoding=_encodings['fs'], errors='strict')
+ reflnk = _unicode_encode(reflnk, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(reflnk)
+ if rc < 0:
+ reflnk = _unicode_decode(reflnk, encoding=_encodings['fs'],
+ errors='replace')
+ raise OSError(
+ _("symlink: Failed getting context of reference symlink \"%s\".") \
+ % reflnk)
+
+ setfscreate(ctx)
+ try:
+ os.symlink(target, link)
+ finally:
+ setfscreate()
diff --git a/portage_with_autodep/pym/portage/_sets/__init__.py b/portage_with_autodep/pym/portage/_sets/__init__.py
new file mode 100644
index 0000000..1b3484e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/__init__.py
@@ -0,0 +1,245 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+__all__ = ["SETPREFIX", "get_boolean", "SetConfigError",
+ "SetConfig", "load_default_config"]
+
+try:
+ from configparser import SafeConfigParser, NoOptionError
+except ImportError:
+ from ConfigParser import SafeConfigParser, NoOptionError
+from portage import os
+from portage import load_mod
+from portage.const import USER_CONFIG_PATH, GLOBAL_CONFIG_PATH
+from portage.const import _ENABLE_SET_CONFIG
+from portage.exception import PackageSetNotFound
+from portage.localization import _
+
+SETPREFIX = "@"
+
+def get_boolean(options, name, default):
+ if not name in options:
+ return default
+ elif options[name].lower() in ("1", "yes", "on", "true"):
+ return True
+ elif options[name].lower() in ("0", "no", "off", "false"):
+ return False
+ else:
+ raise SetConfigError(_("invalid value '%(value)s' for option '%(option)s'") % {"value": options[name], "option": name})
+
+class SetConfigError(Exception):
+ pass
+
+class SetConfig(object):
+ def __init__(self, paths, settings, trees):
+ self._parser = SafeConfigParser(
+ defaults={
+ "EPREFIX" : settings["EPREFIX"],
+ "EROOT" : settings["EROOT"],
+ "PORTAGE_CONFIGROOT" : settings["PORTAGE_CONFIGROOT"],
+ "ROOT" : settings["ROOT"],
+ })
+
+ if _ENABLE_SET_CONFIG:
+ self._parser.read(paths)
+ else:
+ self._create_default_config()
+
+ self.errors = []
+ self.psets = {}
+ self.trees = trees
+ self.settings = settings
+ self._parsed = False
+ self.active = []
+
+ def _create_default_config(self):
+ """
+ Create a default hardcoded set configuration for a portage version
+ that does not support set configuration files. This is only used
+ in the current branch of portage if _ENABLE_SET_CONFIG is False.
+ Even if it's not used in this branch, keep it here in order to
+ minimize the diff between branches.
+
+ [world]
+ class = portage.sets.base.DummyPackageSet
+ packages = @selected @system
+
+ [selected]
+ class = portage.sets.files.WorldSelectedSet
+
+ [system]
+ class = portage.sets.profiles.PackagesSystemSet
+
+ """
+ parser = self._parser
+
+ parser.add_section("world")
+ parser.set("world", "class", "portage.sets.base.DummyPackageSet")
+ parser.set("world", "packages", "@selected @system")
+
+ parser.add_section("selected")
+ parser.set("selected", "class", "portage.sets.files.WorldSelectedSet")
+
+ parser.add_section("system")
+ parser.set("system", "class", "portage.sets.profiles.PackagesSystemSet")
+
+ def update(self, setname, options):
+ parser = self._parser
+ self.errors = []
+ if not setname in self.psets:
+ options["name"] = setname
+ options["world-candidate"] = "False"
+
+ # for the unlikely case that there is already a section with the requested setname
+ import random
+ while setname in parser.sections():
+ setname = "%08d" % random.randint(0, 10**10)
+
+ parser.add_section(setname)
+ for k, v in options.items():
+ parser.set(setname, k, v)
+ else:
+ section = self.psets[setname].creator
+ if parser.has_option(section, "multiset") and \
+ parser.getboolean(section, "multiset"):
+ self.errors.append(_("Invalid request to reconfigure set '%(set)s' generated "
+ "by multiset section '%(section)s'") % {"set": setname, "section": section})
+ return
+ for k, v in options.items():
+ parser.set(section, k, v)
+ self._parse(update=True)
+
+ def _parse(self, update=False):
+ if self._parsed and not update:
+ return
+ parser = self._parser
+ for sname in parser.sections():
+ # find classname for current section, default to file based sets
+ if not parser.has_option(sname, "class"):
+ classname = "portage._sets.files.StaticFileSet"
+ else:
+ classname = parser.get(sname, "class")
+
+ if classname.startswith('portage.sets.'):
+ # The module has been made private, but we still support
+ # the previous namespace for sets.conf entries.
+ classname = classname.replace('sets', '_sets', 1)
+
+ # try to import the specified class
+ try:
+ setclass = load_mod(classname)
+ except (ImportError, AttributeError):
+ try:
+ setclass = load_mod("portage._sets." + classname)
+ except (ImportError, AttributeError):
+ self.errors.append(_("Could not import '%(class)s' for section "
+ "'%(section)s'") % {"class": classname, "section": sname})
+ continue
+ # prepare option dict for the current section
+ optdict = {}
+ for oname in parser.options(sname):
+ optdict[oname] = parser.get(sname, oname)
+
+ # create single or multiple instances of the given class depending on configuration
+ if parser.has_option(sname, "multiset") and \
+ parser.getboolean(sname, "multiset"):
+ if hasattr(setclass, "multiBuilder"):
+ newsets = {}
+ try:
+ newsets = setclass.multiBuilder(optdict, self.settings, self.trees)
+ except SetConfigError as e:
+ self.errors.append(_("Configuration error in section '%s': %s") % (sname, str(e)))
+ continue
+ for x in newsets:
+ if x in self.psets and not update:
+ self.errors.append(_("Redefinition of set '%s' (sections: '%s', '%s')") % (x, self.psets[x].creator, sname))
+ newsets[x].creator = sname
+ if parser.has_option(sname, "world-candidate") and \
+ parser.getboolean(sname, "world-candidate"):
+ newsets[x].world_candidate = True
+ self.psets.update(newsets)
+ else:
+ self.errors.append(_("Section '%(section)s' is configured as multiset, but '%(class)s' "
+ "doesn't support that configuration") % {"section": sname, "class": classname})
+ continue
+ else:
+ try:
+ setname = parser.get(sname, "name")
+ except NoOptionError:
+ setname = sname
+ if setname in self.psets and not update:
+ self.errors.append(_("Redefinition of set '%s' (sections: '%s', '%s')") % (setname, self.psets[setname].creator, sname))
+ if hasattr(setclass, "singleBuilder"):
+ try:
+ self.psets[setname] = setclass.singleBuilder(optdict, self.settings, self.trees)
+ self.psets[setname].creator = sname
+ if parser.has_option(sname, "world-candidate") and \
+ parser.getboolean(sname, "world-candidate"):
+ self.psets[setname].world_candidate = True
+ except SetConfigError as e:
+ self.errors.append(_("Configuration error in section '%s': %s") % (sname, str(e)))
+ continue
+ else:
+ self.errors.append(_("'%(class)s' does not support individual set creation, section '%(section)s' "
+ "must be configured as multiset") % {"class": classname, "section": sname})
+ continue
+ self._parsed = True
+
+ def getSets(self):
+ self._parse()
+ return self.psets.copy()
+
+ def getSetAtoms(self, setname, ignorelist=None):
+ """
+ This raises PackageSetNotFound if the give setname does not exist.
+ """
+ self._parse()
+ try:
+ myset = self.psets[setname]
+ except KeyError:
+ raise PackageSetNotFound(setname)
+ myatoms = myset.getAtoms()
+ parser = self._parser
+
+ if ignorelist is None:
+ ignorelist = set()
+
+ ignorelist.add(setname)
+ for n in myset.getNonAtoms():
+ if n.startswith(SETPREFIX):
+ s = n[len(SETPREFIX):]
+ if s in self.psets:
+ if s not in ignorelist:
+ myatoms.update(self.getSetAtoms(s,
+ ignorelist=ignorelist))
+ else:
+ raise PackageSetNotFound(s)
+
+ return myatoms
+
+def load_default_config(settings, trees):
+
+ if not _ENABLE_SET_CONFIG:
+ return SetConfig(None, settings, trees)
+
+ global_config_path = GLOBAL_CONFIG_PATH
+ if settings['EPREFIX']:
+ global_config_path = os.path.join(settings['EPREFIX'],
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ def _getfiles():
+ for path, dirs, files in os.walk(os.path.join(global_config_path, "sets")):
+ for f in files:
+ if not f.startswith(b'.'):
+ yield os.path.join(path, f)
+
+ dbapi = trees["porttree"].dbapi
+ for repo in dbapi.getRepositories():
+ path = dbapi.getRepositoryPath(repo)
+ yield os.path.join(path, "sets.conf")
+
+ yield os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH, "sets.conf")
+
+ return SetConfig(_getfiles(), settings, trees)
diff --git a/portage_with_autodep/pym/portage/_sets/base.py b/portage_with_autodep/pym/portage/_sets/base.py
new file mode 100644
index 0000000..c8d3ae4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/base.py
@@ -0,0 +1,264 @@
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.dep import Atom, ExtendedAtomDict, best_match_to_list, match_from_list
+from portage.exception import InvalidAtom
+from portage.versions import cpv_getkey
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+OPERATIONS = ["merge", "unmerge"]
+
+class PackageSet(object):
+ # Set this to operations that are supported by your subclass. While
+ # technically there is no difference between "merge" and "unmerge" regarding
+ # package sets, the latter doesn't make sense for some sets like "system"
+ # or "security" and therefore isn't supported by them.
+ _operations = ["merge"]
+ description = "generic package set"
+
+ def __init__(self, allow_wildcard=False, allow_repo=False):
+ self._atoms = set()
+ self._atommap = ExtendedAtomDict(set)
+ self._loaded = False
+ self._loading = False
+ self.errors = []
+ self._nonatoms = set()
+ self.world_candidate = False
+ self._allow_wildcard = allow_wildcard
+ self._allow_repo = allow_repo
+
+ def __contains__(self, atom):
+ self._load()
+ return atom in self._atoms or atom in self._nonatoms
+
+ def __iter__(self):
+ self._load()
+ for x in self._atoms:
+ yield x
+ for x in self._nonatoms:
+ yield x
+
+ def __bool__(self):
+ self._load()
+ return bool(self._atoms or self._nonatoms)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def supportsOperation(self, op):
+ if not op in OPERATIONS:
+ raise ValueError(op)
+ return op in self._operations
+
+ def _load(self):
+ if not (self._loaded or self._loading):
+ self._loading = True
+ self.load()
+ self._loaded = True
+ self._loading = False
+
+ def getAtoms(self):
+ self._load()
+ return self._atoms.copy()
+
+ def getNonAtoms(self):
+ self._load()
+ return self._nonatoms.copy()
+
+ def _setAtoms(self, atoms):
+ self._atoms.clear()
+ self._nonatoms.clear()
+ for a in atoms:
+ if not isinstance(a, Atom):
+ if isinstance(a, basestring):
+ a = a.strip()
+ if not a:
+ continue
+ try:
+ a = Atom(a, allow_wildcard=True, allow_repo=True)
+ except InvalidAtom:
+ self._nonatoms.add(a)
+ continue
+ if not self._allow_wildcard and a.extended_syntax:
+ raise InvalidAtom("extended atom syntax not allowed here")
+ if not self._allow_repo and a.repo:
+ raise InvalidAtom("repository specification not allowed here")
+ self._atoms.add(a)
+
+ self._updateAtomMap()
+
+ def load(self):
+ # This method must be overwritten by subclasses
+ # Editable sets should use the value of self._mtime to determine if they
+ # need to reload themselves
+ raise NotImplementedError()
+
+ def containsCPV(self, cpv):
+ self._load()
+ for a in self._atoms:
+ if match_from_list(a, [cpv]):
+ return True
+ return False
+
+ def getMetadata(self, key):
+ if hasattr(self, key.lower()):
+ return getattr(self, key.lower())
+ else:
+ return ""
+
+ def _updateAtomMap(self, atoms=None):
+ """Update self._atommap for specific atoms or all atoms."""
+ if not atoms:
+ self._atommap.clear()
+ atoms = self._atoms
+ for a in atoms:
+ self._atommap.setdefault(a.cp, set()).add(a)
+
+ # Not sure if this one should really be in PackageSet
+ def findAtomForPackage(self, pkg, modified_use=None):
+ """Return the best match for a given package from the arguments, or
+ None if there are no matches. This matches virtual arguments against
+ the PROVIDE metadata. This can raise an InvalidDependString exception
+ if an error occurs while parsing PROVIDE."""
+
+ if modified_use is not None and modified_use is not pkg.use.enabled:
+ pkg = pkg.copy()
+ pkg.metadata["USE"] = " ".join(modified_use)
+
+ # Atoms matched via PROVIDE must be temporarily transformed since
+ # match_from_list() only works correctly when atom.cp == pkg.cp.
+ rev_transform = {}
+ for atom in self.iterAtomsForPackage(pkg):
+ if atom.cp == pkg.cp:
+ rev_transform[atom] = atom
+ else:
+ rev_transform[Atom(atom.replace(atom.cp, pkg.cp, 1), allow_wildcard=True, allow_repo=True)] = atom
+ best_match = best_match_to_list(pkg, iter(rev_transform))
+ if best_match:
+ return rev_transform[best_match]
+ return None
+
+ def iterAtomsForPackage(self, pkg):
+ """
+ Find all matching atoms for a given package. This matches virtual
+ arguments against the PROVIDE metadata. This will raise an
+ InvalidDependString exception if PROVIDE is invalid.
+ """
+ cpv_slot_list = [pkg]
+ cp = cpv_getkey(pkg.cpv)
+ self._load() # make sure the atoms are loaded
+
+ atoms = self._atommap.get(cp)
+ if atoms:
+ for atom in atoms:
+ if match_from_list(atom, cpv_slot_list):
+ yield atom
+ provides = pkg.metadata['PROVIDE']
+ if not provides:
+ return
+ provides = provides.split()
+ for provide in provides:
+ try:
+ provided_cp = Atom(provide).cp
+ except InvalidAtom:
+ continue
+ atoms = self._atommap.get(provided_cp)
+ if atoms:
+ for atom in atoms:
+ if match_from_list(atom.replace(provided_cp, cp),
+ cpv_slot_list):
+ yield atom
+
+class EditablePackageSet(PackageSet):
+
+ def __init__(self, allow_wildcard=False, allow_repo=False):
+ super(EditablePackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+
+ def update(self, atoms):
+ self._load()
+ modified = False
+ normal_atoms = []
+ for a in atoms:
+ if not isinstance(a, Atom):
+ try:
+ a = Atom(a, allow_wildcard=True, allow_repo=True)
+ except InvalidAtom:
+ modified = True
+ self._nonatoms.add(a)
+ continue
+ if not self._allow_wildcard and a.extended_syntax:
+ raise InvalidAtom("extended atom syntax not allowed here")
+ if not self._allow_repo and a.repo:
+ raise InvalidAtom("repository specification not allowed here")
+ normal_atoms.append(a)
+
+ if normal_atoms:
+ modified = True
+ self._atoms.update(normal_atoms)
+ self._updateAtomMap(atoms=normal_atoms)
+ if modified:
+ self.write()
+
+ def add(self, atom):
+ self.update([atom])
+
+ def replace(self, atoms):
+ self._setAtoms(atoms)
+ self.write()
+
+ def remove(self, atom):
+ self._load()
+ self._atoms.discard(atom)
+ self._nonatoms.discard(atom)
+ self._updateAtomMap()
+ self.write()
+
+ def removePackageAtoms(self, cp):
+ self._load()
+ for a in list(self._atoms):
+ if a.cp == cp:
+ self.remove(a)
+ self.write()
+
+ def write(self):
+ # This method must be overwritten in subclasses that should be editable
+ raise NotImplementedError()
+
+class InternalPackageSet(EditablePackageSet):
+ def __init__(self, initial_atoms=None, allow_wildcard=False, allow_repo=True):
+ """
+ Repo atoms are allowed more often than not, so it makes sense for this
+ class to allow them by default. The Atom constructor and isvalidatom()
+ functions default to allow_repo=False, which is sufficient to ensure
+ that repo atoms are prohibited when necessary.
+ """
+ super(InternalPackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+ if initial_atoms != None:
+ self.update(initial_atoms)
+
+ def clear(self):
+ self._atoms.clear()
+ self._updateAtomMap()
+
+ def load(self):
+ pass
+
+ def write(self):
+ pass
+
+class DummyPackageSet(PackageSet):
+ def __init__(self, atoms=None):
+ super(DummyPackageSet, self).__init__()
+ if atoms:
+ self._setAtoms(atoms)
+
+ def load(self):
+ pass
+
+ def singleBuilder(cls, options, settings, trees):
+ atoms = options.get("packages", "").split()
+ return DummyPackageSet(atoms=atoms)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/_sets/dbapi.py b/portage_with_autodep/pym/portage/_sets/dbapi.py
new file mode 100644
index 0000000..0f238f0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/dbapi.py
@@ -0,0 +1,383 @@
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+
+from portage import os
+from portage.versions import catpkgsplit, catsplit, pkgcmp, best
+from portage.dep import Atom
+from portage.localization import _
+from portage._sets.base import PackageSet
+from portage._sets import SetConfigError, get_boolean
+import portage
+
+__all__ = ["CategorySet", "DowngradeSet",
+ "EverythingSet", "OwnerSet", "VariableSet"]
+
+class EverythingSet(PackageSet):
+ _operations = ["merge"]
+ description = "Package set which contains SLOT " + \
+ "atoms to match all installed packages"
+ _filter = None
+
+ def __init__(self, vdbapi, **kwargs):
+ super(EverythingSet, self).__init__()
+ self._db = vdbapi
+
+ def load(self):
+ myatoms = []
+ db_keys = ["SLOT"]
+ aux_get = self._db.aux_get
+ cp_list = self._db.cp_list
+
+ for cp in self._db.cp_all():
+ for cpv in cp_list(cp):
+ # NOTE: Create SLOT atoms even when there is only one
+ # SLOT installed, in order to avoid the possibility
+ # of unwanted upgrades as reported in bug #338959.
+ slot, = aux_get(cpv, db_keys)
+ atom = Atom("%s:%s" % (cp, slot))
+ if self._filter:
+ if self._filter(atom):
+ myatoms.append(atom)
+ else:
+ myatoms.append(atom)
+
+ self._setAtoms(myatoms)
+
+ def singleBuilder(self, options, settings, trees):
+ return EverythingSet(trees["vartree"].dbapi)
+ singleBuilder = classmethod(singleBuilder)
+
+class OwnerSet(PackageSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "that own one or more files."
+
+ def __init__(self, vardb=None, exclude_files=None, files=None):
+ super(OwnerSet, self).__init__()
+ self._db = vardb
+ self._exclude_files = exclude_files
+ self._files = files
+
+ def mapPathsToAtoms(self, paths, exclude_paths=None):
+ """
+ All paths must have $EROOT stripped from the left side.
+ """
+ rValue = set()
+ vardb = self._db
+ aux_get = vardb.aux_get
+ aux_keys = ["SLOT"]
+ if exclude_paths is None:
+ for link, p in vardb._owners.iter_owners(paths):
+ cat, pn = catpkgsplit(link.mycpv)[:2]
+ slot, = aux_get(link.mycpv, aux_keys)
+ rValue.add("%s/%s:%s" % (cat, pn, slot))
+ else:
+ all_paths = set()
+ all_paths.update(paths)
+ all_paths.update(exclude_paths)
+ exclude_atoms = set()
+ for link, p in vardb._owners.iter_owners(all_paths):
+ cat, pn = catpkgsplit(link.mycpv)[:2]
+ slot, = aux_get(link.mycpv, aux_keys)
+ atom = "%s/%s:%s" % (cat, pn, slot)
+ rValue.add(atom)
+ if p in exclude_paths:
+ exclude_atoms.add(atom)
+ rValue.difference_update(exclude_atoms)
+
+ return rValue
+
+ def load(self):
+ self._setAtoms(self.mapPathsToAtoms(self._files,
+ exclude_paths=self._exclude_files))
+
+ def singleBuilder(cls, options, settings, trees):
+ if not "files" in options:
+ raise SetConfigError(_("no files given"))
+
+ exclude_files = options.get("exclude-files")
+ if exclude_files is not None:
+ exclude_files = frozenset(portage.util.shlex_split(exclude_files))
+ return cls(vardb=trees["vartree"].dbapi, exclude_files=exclude_files,
+ files=frozenset(portage.util.shlex_split(options["files"])))
+
+ singleBuilder = classmethod(singleBuilder)
+
+class VariableSet(EverythingSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "that match specified values of a specified variable."
+
+ def __init__(self, vardb, metadatadb=None, variable=None, includes=None, excludes=None):
+ super(VariableSet, self).__init__(vardb)
+ self._metadatadb = metadatadb
+ self._variable = variable
+ self._includes = includes
+ self._excludes = excludes
+
+ def _filter(self, atom):
+ ebuild = best(self._metadatadb.match(atom))
+ if not ebuild:
+ return False
+ values, = self._metadatadb.aux_get(ebuild, [self._variable])
+ values = values.split()
+ if self._includes and not self._includes.intersection(values):
+ return False
+ if self._excludes and self._excludes.intersection(values):
+ return False
+ return True
+
+ def singleBuilder(cls, options, settings, trees):
+
+ variable = options.get("variable")
+ if variable is None:
+ raise SetConfigError(_("missing required attribute: 'variable'"))
+
+ includes = options.get("includes", "")
+ excludes = options.get("excludes", "")
+
+ if not (includes or excludes):
+ raise SetConfigError(_("no includes or excludes given"))
+
+ metadatadb = options.get("metadata-source", "vartree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option metadata-source") % metadatadb)
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi,
+ excludes=frozenset(excludes.split()),
+ includes=frozenset(includes.split()),
+ variable=variable)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class DowngradeSet(PackageSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "for which the highest visible ebuild version is lower than " + \
+ "the currently installed version."
+
+ def __init__(self, portdb=None, vardb=None):
+ super(DowngradeSet, self).__init__()
+ self._portdb = portdb
+ self._vardb = vardb
+
+ def load(self):
+ atoms = []
+ xmatch = self._portdb.xmatch
+ xmatch_level = "bestmatch-visible"
+ cp_list = self._vardb.cp_list
+ aux_get = self._vardb.aux_get
+ aux_keys = ["SLOT"]
+ for cp in self._vardb.cp_all():
+ for cpv in cp_list(cp):
+ slot, = aux_get(cpv, aux_keys)
+ slot_atom = "%s:%s" % (cp, slot)
+ ebuild = xmatch(xmatch_level, slot_atom)
+ if not ebuild:
+ continue
+ ebuild_split = catpkgsplit(ebuild)[1:]
+ installed_split = catpkgsplit(cpv)[1:]
+ if pkgcmp(installed_split, ebuild_split) > 0:
+ atoms.append(slot_atom)
+
+ self._setAtoms(atoms)
+
+ def singleBuilder(cls, options, settings, trees):
+ return cls(portdb=trees["porttree"].dbapi,
+ vardb=trees["vartree"].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class UnavailableSet(EverythingSet):
+
+ _operations = ["unmerge"]
+
+ description = "Package set which contains all installed " + \
+ "packages for which there are no visible ebuilds " + \
+ "corresponding to the same $CATEGORY/$PN:$SLOT."
+
+ def __init__(self, vardb, metadatadb=None):
+ super(UnavailableSet, self).__init__(vardb)
+ self._metadatadb = metadatadb
+
+ def _filter(self, atom):
+ return not self._metadatadb.match(atom)
+
+ def singleBuilder(cls, options, settings, trees):
+
+ metadatadb = options.get("metadata-source", "porttree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option "
+ "metadata-source") % (metadatadb,))
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class UnavailableBinaries(EverythingSet):
+
+ _operations = ('merge', 'unmerge',)
+
+ description = "Package set which contains all installed " + \
+ "packages for which corresponding binary packages " + \
+ "are not available."
+
+ def __init__(self, vardb, metadatadb=None):
+ super(UnavailableBinaries, self).__init__(vardb)
+ self._metadatadb = metadatadb
+
+ def _filter(self, atom):
+ inst_pkg = self._db.match(atom)
+ if not inst_pkg:
+ return False
+ inst_cpv = inst_pkg[0]
+ return not self._metadatadb.cpv_exists(inst_cpv)
+
+ def singleBuilder(cls, options, settings, trees):
+
+ metadatadb = options.get("metadata-source", "bintree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option "
+ "metadata-source") % (metadatadb,))
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class CategorySet(PackageSet):
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, category, dbapi, only_visible=True):
+ super(CategorySet, self).__init__()
+ self._db = dbapi
+ self._category = category
+ self._check = only_visible
+ if only_visible:
+ s="visible"
+ else:
+ s="all"
+ self.description = "Package set containing %s packages of category %s" % (s, self._category)
+
+ def load(self):
+ myatoms = []
+ for cp in self._db.cp_all():
+ if catsplit(cp)[0] == self._category:
+ if (not self._check) or len(self._db.match(cp)) > 0:
+ myatoms.append(cp)
+ self._setAtoms(myatoms)
+
+ def _builderGetRepository(cls, options, repositories):
+ repository = options.get("repository", "porttree")
+ if not repository in repositories:
+ raise SetConfigError(_("invalid repository class '%s'") % repository)
+ return repository
+ _builderGetRepository = classmethod(_builderGetRepository)
+
+ def _builderGetVisible(cls, options):
+ return get_boolean(options, "only_visible", True)
+ _builderGetVisible = classmethod(_builderGetVisible)
+
+ def singleBuilder(cls, options, settings, trees):
+ if not "category" in options:
+ raise SetConfigError(_("no category given"))
+
+ category = options["category"]
+ if not category in settings.categories:
+ raise SetConfigError(_("invalid category name '%s'") % category)
+
+ repository = cls._builderGetRepository(options, trees.keys())
+ visible = cls._builderGetVisible(options)
+
+ return CategorySet(category, dbapi=trees[repository].dbapi, only_visible=visible)
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(cls, options, settings, trees):
+ rValue = {}
+
+ if "categories" in options:
+ categories = options["categories"].split()
+ invalid = set(categories).difference(settings.categories)
+ if invalid:
+ raise SetConfigError(_("invalid categories: %s") % ", ".join(list(invalid)))
+ else:
+ categories = settings.categories
+
+ repository = cls._builderGetRepository(options, trees.keys())
+ visible = cls._builderGetVisible(options)
+ name_pattern = options.get("name_pattern", "$category/*")
+
+ if not "$category" in name_pattern and not "${category}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include $category placeholder"))
+
+ for cat in categories:
+ myset = CategorySet(cat, trees[repository].dbapi, only_visible=visible)
+ myname = name_pattern.replace("$category", cat)
+ myname = myname.replace("${category}", cat)
+ rValue[myname] = myset
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class AgeSet(EverythingSet):
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, vardb, mode="older", age=7):
+ super(AgeSet, self).__init__(vardb)
+ self._mode = mode
+ self._age = age
+
+ def _filter(self, atom):
+
+ cpv = self._db.match(atom)[0]
+ path = self._db.getpath(cpv, filename="COUNTER")
+ age = (time.time() - os.stat(path).st_mtime) / (3600 * 24)
+ if ((self._mode == "older" and age <= self._age) \
+ or (self._mode == "newer" and age >= self._age)):
+ return False
+ else:
+ return True
+
+ def singleBuilder(cls, options, settings, trees):
+ mode = options.get("mode", "older")
+ if str(mode).lower() not in ["newer", "older"]:
+ raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
+ try:
+ age = int(options.get("age", "7"))
+ except ValueError as e:
+ raise SetConfigError(_("value of option 'age' is not an integer"))
+ return AgeSet(vardb=trees["vartree"].dbapi, mode=mode, age=age)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class RebuiltBinaries(EverythingSet):
+ _operations = ('merge',)
+ _aux_keys = ('BUILD_TIME',)
+
+ def __init__(self, vardb, bindb=None):
+ super(RebuiltBinaries, self).__init__(vardb, bindb=bindb)
+ self._bindb = bindb
+
+ def _filter(self, atom):
+ cpv = self._db.match(atom)[0]
+ inst_build_time, = self._db.aux_get(cpv, self._aux_keys)
+ try:
+ bin_build_time, = self._bindb.aux_get(cpv, self._aux_keys)
+ except KeyError:
+ return False
+ return bool(bin_build_time and (inst_build_time != bin_build_time))
+
+ def singleBuilder(cls, options, settings, trees):
+ return RebuiltBinaries(trees["vartree"].dbapi,
+ bindb=trees["bintree"].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/_sets/files.py b/portage_with_autodep/pym/portage/_sets/files.py
new file mode 100644
index 0000000..f19ecf6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/files.py
@@ -0,0 +1,341 @@
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+from itertools import chain
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.util import grabfile, write_atomic, ensure_dirs, normalize_path
+from portage.const import USER_CONFIG_PATH, WORLD_FILE, WORLD_SETS_FILE
+from portage.const import _ENABLE_SET_CONFIG
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage import portage_gid
+from portage._sets.base import PackageSet, EditablePackageSet
+from portage._sets import SetConfigError, SETPREFIX, get_boolean
+from portage.env.loaders import ItemFileLoader, KeyListFileLoader
+from portage.env.validators import ValidAtomValidator
+from portage import cpv_getkey
+
+__all__ = ["StaticFileSet", "ConfigFileSet", "WorldSelectedSet"]
+
+class StaticFileSet(EditablePackageSet):
+ _operations = ["merge", "unmerge"]
+ _repopath_match = re.compile(r'.*\$\{repository:(?P<reponame>.+)\}.*')
+ _repopath_sub = re.compile(r'\$\{repository:(?P<reponame>.+)\}')
+
+ def __init__(self, filename, greedy=False, dbapi=None):
+ super(StaticFileSet, self).__init__(allow_repo=True)
+ self._filename = filename
+ self._mtime = None
+ self.description = "Package set loaded from file %s" % self._filename
+ self.loader = ItemFileLoader(self._filename, self._validate)
+ if greedy and not dbapi:
+ self.errors.append(_("%s configured as greedy set, but no dbapi instance passed in constructor") % self._filename)
+ greedy = False
+ self.greedy = greedy
+ self.dbapi = dbapi
+
+ metadata = grabfile(self._filename + ".metadata")
+ key = None
+ value = []
+ for line in metadata:
+ line = line.strip()
+ if len(line) == 0 and key != None:
+ setattr(self, key, " ".join(value))
+ key = None
+ elif line[-1] == ":" and key == None:
+ key = line[:-1].lower()
+ value = []
+ elif key != None:
+ value.append(line)
+ else:
+ pass
+ else:
+ if key != None:
+ setattr(self, key, " ".join(value))
+
+ def _validate(self, atom):
+ return bool(atom[:1] == SETPREFIX or ValidAtomValidator(atom, allow_repo=True))
+
+ def write(self):
+ write_atomic(self._filename, "".join("%s\n" % (atom,) \
+ for atom in sorted(chain(self._atoms, self._nonatoms))))
+
+ def load(self):
+ try:
+ mtime = os.stat(self._filename).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime != mtime):
+ try:
+ data, errors = self.loader.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ if self.greedy:
+ atoms = []
+ for a in data:
+ matches = self.dbapi.match(a)
+ for cpv in matches:
+ atoms.append("%s:%s" % (cpv_getkey(cpv),
+ self.dbapi.aux_get(cpv, ["SLOT"])[0]))
+ # In addition to any installed slots, also try to pull
+ # in the latest new slot that may be available.
+ atoms.append(a)
+ else:
+ atoms = iter(data)
+ self._setAtoms(atoms)
+ self._mtime = mtime
+
+ def singleBuilder(self, options, settings, trees):
+ if not "filename" in options:
+ raise SetConfigError(_("no filename specified"))
+ greedy = get_boolean(options, "greedy", False)
+ filename = options["filename"]
+ # look for repository path variables
+ match = self._repopath_match.match(filename)
+ if match:
+ try:
+ filename = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], filename)
+ except KeyError:
+ raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])
+ return StaticFileSet(filename, greedy=greedy, dbapi=trees["vartree"].dbapi)
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(self, options, settings, trees):
+ rValue = {}
+ directory = options.get("directory",
+ os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH, "sets"))
+ name_pattern = options.get("name_pattern", "${name}")
+ if not "$name" in name_pattern and not "${name}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include ${name} placeholder"))
+ greedy = get_boolean(options, "greedy", False)
+ # look for repository path variables
+ match = self._repopath_match.match(directory)
+ if match:
+ try:
+ directory = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], directory)
+ except KeyError:
+ raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])
+
+ try:
+ directory = _unicode_decode(directory,
+ encoding=_encodings['fs'], errors='strict')
+ # Now verify that we can also encode it.
+ _unicode_encode(directory,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeError:
+ directory = _unicode_decode(directory,
+ encoding=_encodings['fs'], errors='replace')
+ raise SetConfigError(
+ _("Directory path contains invalid character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], directory))
+
+ if os.path.isdir(directory):
+ directory = normalize_path(directory)
+
+ for parent, dirs, files in os.walk(directory):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ for d in dirs[:]:
+ if d[:1] == '.':
+ dirs.remove(d)
+ for filename in files:
+ try:
+ filename = _unicode_decode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if filename[:1] == '.':
+ continue
+ if filename.endswith(".metadata"):
+ continue
+ filename = os.path.join(parent,
+ filename)[1 + len(directory):]
+ myname = name_pattern.replace("$name", filename)
+ myname = myname.replace("${name}", filename)
+ rValue[myname] = StaticFileSet(
+ os.path.join(directory, filename),
+ greedy=greedy, dbapi=trees["vartree"].dbapi)
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class ConfigFileSet(PackageSet):
+ def __init__(self, filename):
+ super(ConfigFileSet, self).__init__()
+ self._filename = filename
+ self.description = "Package set generated from %s" % self._filename
+ self.loader = KeyListFileLoader(self._filename, ValidAtomValidator)
+
+ def load(self):
+ data, errors = self.loader.load()
+ self._setAtoms(iter(data))
+
+ def singleBuilder(self, options, settings, trees):
+ if not "filename" in options:
+ raise SetConfigError(_("no filename specified"))
+ return ConfigFileSet(options["filename"])
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(self, options, settings, trees):
+ rValue = {}
+ directory = options.get("directory",
+ os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH))
+ name_pattern = options.get("name_pattern", "sets/package_$suffix")
+ if not "$suffix" in name_pattern and not "${suffix}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include $suffix placeholder"))
+ for suffix in ["keywords", "use", "mask", "unmask"]:
+ myname = name_pattern.replace("$suffix", suffix)
+ myname = myname.replace("${suffix}", suffix)
+ rValue[myname] = ConfigFileSet(os.path.join(directory, "package."+suffix))
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class WorldSelectedSet(EditablePackageSet):
+ description = "Set of packages that were directly installed by the user"
+
+ def __init__(self, eroot):
+ super(WorldSelectedSet, self).__init__(allow_repo=True)
+ # most attributes exist twice as atoms and non-atoms are stored in
+ # separate files
+ self._lock = None
+ self._filename = os.path.join(eroot, WORLD_FILE)
+ self.loader = ItemFileLoader(self._filename, self._validate)
+ self._mtime = None
+
+ self._filename2 = os.path.join(eroot, WORLD_SETS_FILE)
+ self.loader2 = ItemFileLoader(self._filename2, self._validate2)
+ self._mtime2 = None
+
+ def _validate(self, atom):
+ return ValidAtomValidator(atom, allow_repo=True)
+
+ def _validate2(self, setname):
+ return setname.startswith(SETPREFIX)
+
+ def write(self):
+ write_atomic(self._filename,
+ "".join(sorted("%s\n" % x for x in self._atoms)))
+
+ if _ENABLE_SET_CONFIG:
+ write_atomic(self._filename2,
+ "".join(sorted("%s\n" % x for x in self._nonatoms)))
+
+ def load(self):
+ atoms = []
+ nonatoms = []
+ atoms_changed = False
+ # load atoms and non-atoms from different files so the worldfile is
+ # backwards-compatible with older versions and other PMs, even though
+ # it's supposed to be private state data :/
+ try:
+ mtime = os.stat(self._filename).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime != mtime):
+ try:
+ data, errors = self.loader.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ atoms = list(data)
+ self._mtime = mtime
+ atoms_changed = True
+ else:
+ atoms.extend(self._atoms)
+
+ if _ENABLE_SET_CONFIG:
+ changed2, nonatoms = self._load2()
+ atoms_changed |= changed2
+
+ if atoms_changed:
+ self._setAtoms(atoms+nonatoms)
+
+ def _load2(self):
+ changed = False
+ try:
+ mtime = os.stat(self._filename2).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime2 != mtime):
+ try:
+ data, errors = self.loader2.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ nonatoms = list(data)
+ self._mtime2 = mtime
+ changed = True
+ else:
+ nonatoms = list(self._nonatoms)
+
+ return changed, nonatoms
+
+ def _ensure_dirs(self):
+ ensure_dirs(os.path.dirname(self._filename), gid=portage_gid, mode=0o2750, mask=0o2)
+
+ def lock(self):
+ self._ensure_dirs()
+ self._lock = lockfile(self._filename, wantnewlockfile=1)
+
+ def unlock(self):
+ unlockfile(self._lock)
+ self._lock = None
+
+ def cleanPackage(self, vardb, cpv):
+ '''
+ Before calling this function you should call lock and load.
+ After calling this function you should call unlock.
+ '''
+ if not self._lock:
+ raise AssertionError('cleanPackage needs the set to be locked')
+
+ worldlist = list(self._atoms)
+ mykey = cpv_getkey(cpv)
+ newworldlist = []
+ for x in worldlist:
+ if x.cp == mykey:
+ matches = vardb.match(x, use_cache=0)
+ if not matches:
+ #zap our world entry
+ pass
+ elif len(matches) == 1 and matches[0] == cpv:
+ #zap our world entry
+ pass
+ else:
+ #others are around; keep it.
+ newworldlist.append(x)
+ else:
+ #this doesn't match the package we're unmerging; keep it.
+ newworldlist.append(x)
+
+ newworldlist.extend(self._nonatoms)
+ self.replace(newworldlist)
+
+ def singleBuilder(self, options, settings, trees):
+ return WorldSelectedSet(settings["EROOT"])
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/_sets/libs.py b/portage_with_autodep/pym/portage/_sets/libs.py
new file mode 100644
index 0000000..6c5babc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/libs.py
@@ -0,0 +1,98 @@
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from portage.localization import _
+from portage._sets.base import PackageSet
+from portage._sets import get_boolean, SetConfigError
+from portage.versions import cpv_getkey
+import portage
+
+class LibraryConsumerSet(PackageSet):
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, vardbapi, debug=False):
+ super(LibraryConsumerSet, self).__init__()
+ self.dbapi = vardbapi
+ self.debug = debug
+
+ def mapPathsToAtoms(self, paths):
+ rValue = set()
+ for p in paths:
+ for cpv in self.dbapi._linkmap.getOwners(p):
+ try:
+ slot, = self.dbapi.aux_get(cpv, ["SLOT"])
+ except KeyError:
+ # This is expected for preserved libraries
+ # of packages that have been uninstalled
+ # without replacement.
+ pass
+ else:
+ rValue.add("%s:%s" % (cpv_getkey(cpv), slot))
+ return rValue
+
+class LibraryFileConsumerSet(LibraryConsumerSet):
+
+ """
+ Note: This does not detect libtool archive (*.la) files that consume the
+ specified files (revdep-rebuild is able to detect them).
+ """
+
+ description = "Package set which contains all packages " + \
+ "that consume the specified library file(s)."
+
+ def __init__(self, vardbapi, files, **kargs):
+ super(LibraryFileConsumerSet, self).__init__(vardbapi, **kargs)
+ self.files = files
+
+ def load(self):
+ consumers = set()
+ for lib in self.files:
+ consumers.update(self.dbapi._linkmap.findConsumers(lib))
+
+ if not consumers:
+ return
+ self._setAtoms(self.mapPathsToAtoms(consumers))
+
+ def singleBuilder(cls, options, settings, trees):
+ files = tuple(portage.util.shlex_split(options.get("files", "")))
+ if not files:
+ raise SetConfigError(_("no files given"))
+ debug = get_boolean(options, "debug", False)
+ return LibraryFileConsumerSet(trees["vartree"].dbapi,
+ files, debug=debug)
+ singleBuilder = classmethod(singleBuilder)
+
+class PreservedLibraryConsumerSet(LibraryConsumerSet):
+ def load(self):
+ reg = self.dbapi._plib_registry
+ if reg is None:
+ # preserve-libs is entirely disabled
+ return
+ consumers = set()
+ if reg:
+ plib_dict = reg.getPreservedLibs()
+ for libs in plib_dict.values():
+ for lib in libs:
+ if self.debug:
+ print(lib)
+ for x in sorted(self.dbapi._linkmap.findConsumers(lib)):
+ print(" ", x)
+ print("-"*40)
+ consumers.update(self.dbapi._linkmap.findConsumers(lib))
+ # Don't rebuild packages just because they contain preserved
+ # libs that happen to be consumers of other preserved libs.
+ for libs in plib_dict.values():
+ consumers.difference_update(libs)
+ else:
+ return
+ if not consumers:
+ return
+ self._setAtoms(self.mapPathsToAtoms(consumers))
+
+ def singleBuilder(cls, options, settings, trees):
+ debug = get_boolean(options, "debug", False)
+ return PreservedLibraryConsumerSet(trees["vartree"].dbapi,
+ debug=debug)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/_sets/profiles.py b/portage_with_autodep/pym/portage/_sets/profiles.py
new file mode 100644
index 0000000..39a2968
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/profiles.py
@@ -0,0 +1,53 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+
+from portage import os
+from portage.util import grabfile_package, stack_lists
+from portage._sets.base import PackageSet
+from portage._sets import get_boolean
+from portage.util import writemsg_level
+
+__all__ = ["PackagesSystemSet"]
+
+class PackagesSystemSet(PackageSet):
+ _operations = ["merge"]
+
+ def __init__(self, profile_paths, debug=False):
+ super(PackagesSystemSet, self).__init__()
+ self._profile_paths = profile_paths
+ self._debug = debug
+ if profile_paths:
+ description = self._profile_paths[-1]
+ if description == "/etc/portage/profile" and \
+ len(self._profile_paths) > 1:
+ description = self._profile_paths[-2]
+ else:
+ description = None
+ self.description = "System packages for profile %s" % description
+
+ def load(self):
+ debug = self._debug
+ if debug:
+ writemsg_level("\nPackagesSystemSet: profile paths: %s\n" % \
+ (self._profile_paths,), level=logging.DEBUG, noiselevel=-1)
+
+ mylist = [grabfile_package(os.path.join(x, "packages"), verify_eapi=True) for x in self._profile_paths]
+
+ if debug:
+ writemsg_level("\nPackagesSystemSet: raw packages: %s\n" % \
+ (mylist,), level=logging.DEBUG, noiselevel=-1)
+
+ mylist = stack_lists(mylist, incremental=1)
+
+ if debug:
+ writemsg_level("\nPackagesSystemSet: stacked packages: %s\n" % \
+ (mylist,), level=logging.DEBUG, noiselevel=-1)
+
+ self._setAtoms([x[1:] for x in mylist if x[0] == "*"])
+
+ def singleBuilder(self, options, settings, trees):
+ debug = get_boolean(options, "debug", False)
+ return PackagesSystemSet(settings.profiles, debug=debug)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/_sets/security.py b/portage_with_autodep/pym/portage/_sets/security.py
new file mode 100644
index 0000000..2d8fcf6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/security.py
@@ -0,0 +1,86 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.glsa as glsa
+from portage._sets.base import PackageSet
+from portage.versions import catpkgsplit, pkgcmp
+from portage._sets import get_boolean
+
+__all__ = ["SecuritySet", "NewGlsaSet", "NewAffectedSet", "AffectedSet"]
+
+class SecuritySet(PackageSet):
+ _operations = ["merge"]
+ _skip_applied = False
+
+ description = "package set that includes all packages possibly affected by a GLSA"
+
+ def __init__(self, settings, vardbapi, portdbapi, least_change=True):
+ super(SecuritySet, self).__init__()
+ self._settings = settings
+ self._vardbapi = vardbapi
+ self._portdbapi = portdbapi
+ self._least_change = least_change
+
+ def getGlsaList(self, skip_applied):
+ glsaindexlist = glsa.get_glsa_list(self._settings)
+ if skip_applied:
+ applied_list = glsa.get_applied_glsas(self._settings)
+ glsaindexlist = set(glsaindexlist).difference(applied_list)
+ glsaindexlist = list(glsaindexlist)
+ glsaindexlist.sort()
+ return glsaindexlist
+
+ def load(self):
+ glsaindexlist = self.getGlsaList(self._skip_applied)
+ atomlist = []
+ for glsaid in glsaindexlist:
+ myglsa = glsa.Glsa(glsaid, self._settings, self._vardbapi, self._portdbapi)
+ #print glsaid, myglsa.isVulnerable(), myglsa.isApplied(), myglsa.getMergeList()
+ if self.useGlsa(myglsa):
+ atomlist += ["="+x for x in myglsa.getMergeList(least_change=self._least_change)]
+ self._setAtoms(self._reduce(atomlist))
+
+ def _reduce(self, atomlist):
+ mydict = {}
+ for atom in atomlist[:]:
+ cpv = self._portdbapi.xmatch("match-all", atom)[0]
+ slot = self._portdbapi.aux_get(cpv, ["SLOT"])[0]
+ cps = "/".join(catpkgsplit(cpv)[0:2]) + ":" + slot
+ if not cps in mydict:
+ mydict[cps] = (atom, cpv)
+ else:
+ other_cpv = mydict[cps][1]
+ if pkgcmp(catpkgsplit(cpv)[1:], catpkgsplit(other_cpv)[1:]) > 0:
+ atomlist.remove(mydict[cps][0])
+ mydict[cps] = (atom, cpv)
+ return atomlist
+
+ def useGlsa(self, myglsa):
+ return True
+
+ def updateAppliedList(self):
+ glsaindexlist = self.getGlsaList(True)
+ applied_list = glsa.get_applied_glsas(self._settings)
+ for glsaid in glsaindexlist:
+ myglsa = glsa.Glsa(glsaid, self._settings, self._vardbapi, self._portdbapi)
+ if not myglsa.isVulnerable() and not myglsa.nr in applied_list:
+ myglsa.inject()
+
+ def singleBuilder(cls, options, settings, trees):
+ least_change = not get_boolean(options, "use_emerge_resolver", False)
+ return cls(settings, trees["vartree"].dbapi, trees["porttree"].dbapi, least_change=least_change)
+ singleBuilder = classmethod(singleBuilder)
+
+class NewGlsaSet(SecuritySet):
+ _skip_applied = True
+ description = "Package set that includes all packages possibly affected by an unapplied GLSA"
+
+class AffectedSet(SecuritySet):
+ description = "Package set that includes all packages affected by an unapplied GLSA"
+
+ def useGlsa(self, myglsa):
+ return myglsa.isVulnerable()
+
+class NewAffectedSet(AffectedSet):
+ _skip_applied = True
+ description = "Package set that includes all packages affected by an unapplied GLSA"
diff --git a/portage_with_autodep/pym/portage/_sets/shell.py b/portage_with_autodep/pym/portage/_sets/shell.py
new file mode 100644
index 0000000..2c95845
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/shell.py
@@ -0,0 +1,44 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+from portage import os
+from portage import _unicode_decode
+from portage._sets.base import PackageSet
+from portage._sets import SetConfigError
+
+__all__ = ["CommandOutputSet"]
+
+class CommandOutputSet(PackageSet):
+ """This class creates a PackageSet from the output of a shell command.
+ The shell command should produce one atom per line, that is:
+
+ >>> atom1
+ atom2
+ ...
+ atomN
+
+ Args:
+ name: A string that identifies the set.
+ command: A string or sequence identifying the command to run
+ (see the subprocess.Popen documentaion for the format)
+ """
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, command):
+ super(CommandOutputSet, self).__init__()
+ self._command = command
+ self.description = "Package set generated from output of '%s'" % self._command
+
+ def load(self):
+ pipe = subprocess.Popen(self._command, stdout=subprocess.PIPE, shell=True)
+ stdout, stderr = pipe.communicate()
+ if pipe.wait() == os.EX_OK:
+ self._setAtoms(_unicode_decode(stdout).splitlines())
+
+ def singleBuilder(self, options, settings, trees):
+ if not "command" in options:
+ raise SetConfigError("no command specified")
+ return CommandOutputSet(options["command"])
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/cache/__init__.py b/portage_with_autodep/pym/portage/cache/__init__.py
new file mode 100644
index 0000000..e7fe599
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/__init__.py
@@ -0,0 +1,4 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
diff --git a/portage_with_autodep/pym/portage/cache/anydbm.py b/portage_with_autodep/pym/portage/cache/anydbm.py
new file mode 100644
index 0000000..1d56b14
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/anydbm.py
@@ -0,0 +1,113 @@
+# Copyright 2005-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from __future__ import absolute_import
+
+try:
+ import anydbm as anydbm_module
+except ImportError:
+ # python 3.x
+ import dbm as anydbm_module
+
+try:
+ import dbm.gnu as gdbm
+except ImportError:
+ try:
+ import gdbm
+ except ImportError:
+ gdbm = None
+
+try:
+ from dbm import whichdb
+except ImportError:
+ from whichdb import whichdb
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+from portage import _unicode_encode
+from portage import os
+import sys
+from portage.cache import fs_template
+from portage.cache import cache_errors
+
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+ cleanse_keys = True
+ serialize_eclasses = False
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+
+ default_db = config.get("dbtype","anydbm")
+ if not default_db.startswith("."):
+ default_db = '.' + default_db
+
+ self._db_path = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+default_db)
+ self.__db = None
+ mode = "w"
+ if whichdb(self._db_path) in ("dbm.gnu", "gdbm"):
+ # Allow multiple concurrent writers (see bug #53607).
+ mode += "u"
+ try:
+ # dbm.open() will not work with bytes in python-3.1:
+ # TypeError: can't concat bytes to str
+ self.__db = anydbm_module.open(self._db_path,
+ mode, self._perms)
+ except anydbm_module.error:
+ # XXX handle this at some point
+ try:
+ self._ensure_dirs()
+ self._ensure_dirs(self._db_path)
+ except (OSError, IOError) as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ # try again if failed
+ try:
+ if self.__db == None:
+ # dbm.open() will not work with bytes in python-3.1:
+ # TypeError: can't concat bytes to str
+ if gdbm is None:
+ self.__db = anydbm_module.open(self._db_path,
+ "c", self._perms)
+ else:
+ # Prefer gdbm type if available, since it allows
+ # multiple concurrent writers (see bug #53607).
+ self.__db = gdbm.open(self._db_path,
+ "cu", self._perms)
+ except anydbm_module.error as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+ self._ensure_access(self._db_path)
+
+ def iteritems(self):
+ # dbm doesn't implement items()
+ for k in self.__db.keys():
+ yield (k, self[k])
+
+ def _getitem(self, cpv):
+ # we override getitem because it's just a cpickling of the data handed in.
+ return pickle.loads(self.__db[_unicode_encode(cpv)])
+
+ def _setitem(self, cpv, values):
+ self.__db[_unicode_encode(cpv)] = pickle.dumps(values,pickle.HIGHEST_PROTOCOL)
+
+ def _delitem(self, cpv):
+ del self.__db[cpv]
+
+ def __iter__(self):
+ return iter(list(self.__db.keys()))
+
+ def __contains__(self, cpv):
+ return cpv in self.__db
+
+ def __del__(self):
+ if "__db" in self.__dict__ and self.__db != None:
+ self.__db.sync()
+ self.__db.close()
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
diff --git a/portage_with_autodep/pym/portage/cache/cache_errors.py b/portage_with_autodep/pym/portage/cache/cache_errors.py
new file mode 100644
index 0000000..3c1f239
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/cache_errors.py
@@ -0,0 +1,62 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+class CacheError(Exception): pass
+
+class InitializationError(CacheError):
+ def __init__(self, class_name, error):
+ self.error, self.class_name = error, class_name
+ def __str__(self):
+ return "Creation of instance %s failed due to %s" % \
+ (self.class_name, str(self.error))
+
+
+class CacheCorruption(CacheError):
+ def __init__(self, key, ex):
+ self.key, self.ex = key, ex
+ def __str__(self):
+ return "%s is corrupt: %s" % (self.key, str(self.ex))
+
+
+class GeneralCacheCorruption(CacheError):
+ def __init__(self,ex): self.ex = ex
+ def __str__(self): return "corruption detected: %s" % str(self.ex)
+
+
+class InvalidRestriction(CacheError):
+ def __init__(self, key, restriction, exception=None):
+ if exception == None: exception = ''
+ self.key, self.restriction, self.ex = key, restriction, ex
+ def __str__(self):
+ return "%s:%s is not valid: %s" % \
+ (self.key, self.restriction, str(self.ex))
+
+
+class ReadOnlyRestriction(CacheError):
+ def __init__(self, info=''):
+ self.info = info
+ def __str__(self):
+ return "cache is non-modifiable"+str(self.info)
+
+class StatCollision(CacheError):
+ """
+ If the content of a cache entry changes and neither the file mtime nor
+ size changes, it will prevent rsync from detecting changes. Cache backends
+ may raise this exception from _setitem() if they detect this type of stat
+ collision. See bug #139134.
+ """
+ def __init__(self, key, filename, mtime, size):
+ self.key = key
+ self.filename = filename
+ self.mtime = mtime
+ self.size = size
+
+ def __str__(self):
+ return "%s has stat collision with size %s and mtime %s" % \
+ (self.key, self.size, self.mtime)
+
+ def __repr__(self):
+ return "portage.cache.cache_errors.StatCollision(%s)" % \
+ (', '.join((repr(self.key), repr(self.filename),
+ repr(self.mtime), repr(self.size))),)
diff --git a/portage_with_autodep/pym/portage/cache/ebuild_xattr.py b/portage_with_autodep/pym/portage/cache/ebuild_xattr.py
new file mode 100644
index 0000000..6b388fa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/ebuild_xattr.py
@@ -0,0 +1,171 @@
+# Copyright: 2009-2011 Gentoo Foundation
+# Author(s): Petteri R&#228;ty (betelgeuse@gentoo.org)
+# License: GPL2
+
+__all__ = ['database']
+
+import errno
+
+import portage
+from portage.cache import fs_template
+from portage.versions import catsplit
+from portage import cpv_getkey
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'xattr')
+
+class NoValueException(Exception):
+ pass
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.portdir = self.label
+ self.ns = xattr.NS_USER + '.gentoo.cache'
+ self.keys = set(self._known_keys)
+ self.keys.add('_mtime_')
+ self.keys.add('_eclasses_')
+ # xattrs have an upper length
+ self.max_len = self.__get_max()
+
+ def __get_max(self):
+ path = os.path.join(self.portdir,'profiles/repo_name')
+ try:
+ return int(self.__get(path,'value_max_len'))
+ except NoValueException as e:
+ max = self.__calc_max(path)
+ self.__set(path,'value_max_len',str(max))
+ return max
+
+ def __calc_max(self,path):
+ """ Find out max attribute length supported by the file system """
+
+ hundred = ''
+ for i in range(100):
+ hundred+='a'
+
+ s=hundred
+
+ # Could use finally but needs python 2.5 then
+ try:
+ while True:
+ self.__set(path,'test_max',s)
+ s+=hundred
+ except IOError as e:
+ # ext based give wrong errno
+ # http://bugzilla.kernel.org/show_bug.cgi?id=12793
+ if e.errno in (errno.E2BIG, errno.ENOSPC):
+ result = len(s)-100
+ else:
+ raise
+
+ try:
+ self.__remove(path,'test_max')
+ except IOError as e:
+ if e.errno != errno.ENODATA:
+ raise
+
+ return result
+
+ def __get_path(self,cpv):
+ cat,pn = catsplit(cpv_getkey(cpv))
+ return os.path.join(self.portdir,cat,pn,os.path.basename(cpv) + ".ebuild")
+
+ def __has_cache(self,path):
+ try:
+ self.__get(path,'_mtime_')
+ except NoValueException as e:
+ return False
+
+ return True
+
+ def __get(self,path,key,default=None):
+ try:
+ return xattr.get(path,key,namespace=self.ns)
+ except IOError as e:
+ if not default is None and errno.ENODATA == e.errno:
+ return default
+ else:
+ raise NoValueException()
+
+ def __remove(self,path,key):
+ xattr.remove(path,key,namespace=self.ns)
+
+ def __set(self,path,key,value):
+ xattr.set(path,key,value,namespace=self.ns)
+
+ def _getitem(self, cpv):
+ values = {}
+ path = self.__get_path(cpv)
+ all = {}
+ for tuple in xattr.get_all(path,namespace=self.ns):
+ key,value = tuple
+ all[key] = value
+
+ if not '_mtime_' in all:
+ raise KeyError(cpv)
+
+ # We default to '' like other caches
+ for key in self.keys:
+ attr_value = all.get(key,'1:')
+ parts,sep,value = attr_value.partition(':')
+ parts = int(parts)
+ if parts > 1:
+ for i in range(1,parts):
+ value += all.get(key+str(i))
+ values[key] = value
+
+ return values
+
+ def _setitem(self, cpv, values):
+ path = self.__get_path(cpv)
+ max = self.max_len
+ for key,value in values.items():
+ # mtime comes in as long so need to convert to strings
+ s = str(value)
+ # We need to split long values
+ value_len = len(s)
+ parts = 0
+ if value_len > max:
+ # Find out how many parts we need
+ parts = value_len/max
+ if value_len % max > 0:
+ parts += 1
+
+ # Only the first entry carries the number of parts
+ self.__set(path,key,'%s:%s'%(parts,s[0:max]))
+
+ # Write out the rest
+ for i in range(1,parts):
+ start = i * max
+ val = s[start:start+max]
+ self.__set(path,key+str(i),val)
+ else:
+ self.__set(path,key,"%s:%s"%(1,s))
+
+ def _delitem(self, cpv):
+ pass # Will be gone with the ebuild
+
+ def __contains__(self, cpv):
+ return os.path.exists(self.__get_path(cpv))
+
+ def __iter__(self):
+
+ for root, dirs, files in os.walk(self.portdir):
+ for file in files:
+ try:
+ file = _unicode_decode(file,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if file[-7:] == '.ebuild':
+ cat = os.path.basename(os.path.dirname(root))
+ pn_pv = file[:-7]
+ path = os.path.join(root,file)
+ if self.__has_cache(path):
+ yield "%s/%s/%s" % (cat,os.path.basename(root),file[:-7])
diff --git a/portage_with_autodep/pym/portage/cache/flat_hash.py b/portage_with_autodep/pym/portage/cache/flat_hash.py
new file mode 100644
index 0000000..b6bc074
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/flat_hash.py
@@ -0,0 +1,155 @@
+# Copyright: 2005-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from portage.cache import fs_template
+from portage.cache import cache_errors
+import errno
+import io
+import stat
+import sys
+import os as _os
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+# Coerce to unicode, in order to prevent TypeError when writing
+# raw bytes to TextIOWrapper with python2.
+_setitem_fmt = _unicode_decode("%s=%s\n")
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+ write_keys = set(self._known_keys)
+ write_keys.add("_eclasses_")
+ write_keys.add("_mtime_")
+ self._write_keys = sorted(write_keys)
+ if not self.readonly and not os.path.exists(self.location):
+ self._ensure_dirs()
+
+ def _getitem(self, cpv):
+ # Don't use os.path.join, for better performance.
+ fp = self.location + _os.sep + cpv
+ try:
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ try:
+ lines = myf.read().split("\n")
+ if not lines[-1]:
+ lines.pop()
+ d = self._parse_data(lines, cpv)
+ if '_mtime_' not in d:
+ # Backward compatibility with old cache
+ # that uses mtime mangling.
+ d['_mtime_'] = _os.fstat(myf.fileno())[stat.ST_MTIME]
+ return d
+ finally:
+ myf.close()
+ except (IOError, OSError) as e:
+ if e.errno != errno.ENOENT:
+ raise cache_errors.CacheCorruption(cpv, e)
+ raise KeyError(cpv, e)
+
+ def _parse_data(self, data, cpv):
+ try:
+ return dict( x.split("=", 1) for x in data )
+ except ValueError as e:
+ # If a line is missing an "=", the split length is 1 instead of 2.
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def _setitem(self, cpv, values):
+# import pdb;pdb.set_trace()
+ s = cpv.rfind("/")
+ fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try:
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except (IOError, OSError) as e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except (OSError, IOError) as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try:
+ for k in self._write_keys:
+ v = values.get(k)
+ if not v:
+ continue
+ myf.write(_setitem_fmt % (k, v))
+ finally:
+ myf.close()
+ self._ensure_access(fp)
+
+ #update written. now we move it.
+
+ new_fp = os.path.join(self.location,cpv)
+ try:
+ os.rename(fp, new_fp)
+ except (OSError, IOError) as e:
+ os.remove(fp)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def _delitem(self, cpv):
+# import pdb;pdb.set_trace()
+ try:
+ os.remove(os.path.join(self.location,cpv))
+ except OSError as e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def __contains__(self, cpv):
+ return os.path.exists(os.path.join(self.location, cpv))
+
+ def __iter__(self):
+ """generator for walking the dir struct"""
+ dirs = [(0, self.location)]
+ len_base = len(self.location)
+ while dirs:
+ depth, dir_path = dirs.pop()
+ try:
+ dir_list = os.listdir(dir_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ continue
+ for l in dir_list:
+ if l.endswith(".cpickle"):
+ continue
+ p = os.path.join(dir_path, l)
+ try:
+ st = os.lstat(p)
+ except OSError:
+ # Cache entry disappeared.
+ continue
+ if stat.S_ISDIR(st.st_mode):
+ # Only recurse 1 deep, in order to avoid iteration over
+ # entries from another nested cache instance. This can
+ # happen if the user nests an overlay inside
+ # /usr/portage/local as in bug #302764.
+ if depth < 1:
+ dirs.append((depth+1, p))
+ continue
+ yield p[len_base+1:]
diff --git a/portage_with_autodep/pym/portage/cache/flat_list.py b/portage_with_autodep/pym/portage/cache/flat_list.py
new file mode 100644
index 0000000..7288307
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/flat_list.py
@@ -0,0 +1,134 @@
+# Copyright 2005-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.cache import fs_template
+from portage.cache import cache_errors
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import errno
+import io
+import stat
+import sys
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+# Coerce to unicode, in order to prevent TypeError when writing
+# raw bytes to TextIOWrapper with python2.
+_setitem_fmt = _unicode_decode("%s\n")
+
+# store the current key order *here*.
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ # do not screw with this ordering. _eclasses_ needs to be last
+ auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'IUSE', 'REQUIRED_USE',
+ 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if len(self._known_keys) > len(self.auxdbkey_order) + 2:
+ raise Exception("less ordered keys then auxdbkeys")
+ if not os.path.exists(self.location):
+ self._ensure_dirs()
+
+
+ def _getitem(self, cpv):
+ d = {}
+ try:
+ myf = io.open(_unicode_encode(os.path.join(self.location, cpv),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ for k,v in zip(self.auxdbkey_order, myf):
+ d[k] = v.rstrip("\n")
+ except (OSError, IOError) as e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try:
+ d["_mtime_"] = os.fstat(myf.fileno())[stat.ST_MTIME]
+ except OSError as e:
+ myf.close()
+ raise cache_errors.CacheCorruption(cpv, e)
+ myf.close()
+ return d
+
+
+ def _setitem(self, cpv, values):
+ s = cpv.rfind("/")
+ fp=os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try:
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except (OSError, IOError) as e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except (OSError, IOError) as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ for x in self.auxdbkey_order:
+ myf.write(_setitem_fmt % (values.get(x, ""),))
+
+ myf.close()
+ self._ensure_access(fp, mtime=values["_mtime_"])
+ #update written. now we move it.
+ new_fp = os.path.join(self.location,cpv)
+ try:
+ os.rename(fp, new_fp)
+ except (OSError, IOError) as e:
+ os.remove(fp)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def _delitem(self, cpv):
+ try:
+ os.remove(os.path.join(self.location,cpv))
+ except OSError as e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def __contains__(self, cpv):
+ return os.path.exists(os.path.join(self.location, cpv))
+
+
+ def __iter__(self):
+ """generator for walking the dir struct"""
+ dirs = [self.location]
+ len_base = len(self.location)
+ while len(dirs):
+ for l in os.listdir(dirs[0]):
+ if l.endswith(".cpickle"):
+ continue
+ p = os.path.join(dirs[0],l)
+ st = os.lstat(p)
+ if stat.S_ISDIR(st.st_mode):
+ dirs.append(p)
+ continue
+ yield p[len_base+1:]
+ dirs.pop(0)
+
+
+ def commit(self): pass
diff --git a/portage_with_autodep/pym/portage/cache/fs_template.py b/portage_with_autodep/pym/portage/cache/fs_template.py
new file mode 100644
index 0000000..a82e862
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/fs_template.py
@@ -0,0 +1,90 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import sys
+from portage.cache import template
+from portage import os
+
+from portage.proxy.lazyimport import lazyimport
+lazyimport(globals(),
+ 'portage.data:portage_gid',
+ 'portage.exception:PortageException',
+ 'portage.util:apply_permissions',
+)
+del lazyimport
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class FsBased(template.database):
+ """template wrapping fs needed options, and providing _ensure_access as a way to
+ attempt to ensure files have the specified owners/perms"""
+
+ def __init__(self, *args, **config):
+ """throws InitializationError if needs args aren't specified
+ gid and perms aren't listed do to an oddity python currying mechanism
+ gid=portage_gid
+ perms=0665"""
+
+ for x, y in (("gid", -1), ("perms", -1)):
+ if x in config:
+ setattr(self, "_"+x, config[x])
+ del config[x]
+ else:
+ setattr(self, "_"+x, y)
+ super(FsBased, self).__init__(*args, **config)
+
+ if self.label.startswith(os.path.sep):
+ # normpath.
+ self.label = os.path.sep + os.path.normpath(self.label).lstrip(os.path.sep)
+
+
+ def _ensure_access(self, path, mtime=-1):
+ """returns true or false if it's able to ensure that path is properly chmod'd and chowned.
+ if mtime is specified, attempts to ensure that's correct also"""
+ try:
+ apply_permissions(path, gid=self._gid, mode=self._perms)
+ if mtime != -1:
+ mtime=long(mtime)
+ os.utime(path, (mtime, mtime))
+ except (PortageException, EnvironmentError):
+ return False
+ return True
+
+ def _ensure_dirs(self, path=None):
+ """with path!=None, ensure beyond self.location. otherwise, ensure self.location"""
+ if path:
+ path = os.path.dirname(path)
+ base = self.location
+ else:
+ path = self.location
+ base='/'
+
+ for dir in path.lstrip(os.path.sep).rstrip(os.path.sep).split(os.path.sep):
+ base = os.path.join(base,dir)
+ if not os.path.exists(base):
+ if self._perms != -1:
+ um = os.umask(0)
+ try:
+ perms = self._perms
+ if perms == -1:
+ perms = 0
+ perms |= 0o755
+ os.mkdir(base, perms)
+ if self._gid != -1:
+ os.chown(base, -1, self._gid)
+ finally:
+ if self._perms != -1:
+ os.umask(um)
+
+
+def gen_label(base, label):
+ """if supplied label is a path, generate a unique label based upon label, and supplied base path"""
+ if label.find(os.path.sep) == -1:
+ return label
+ label = label.strip("\"").strip("'")
+ label = os.path.join(*(label.rstrip(os.path.sep).split(os.path.sep)))
+ tail = os.path.split(label)[1]
+ return "%s-%X" % (tail, abs(label.__hash__()))
+
diff --git a/portage_with_autodep/pym/portage/cache/mappings.py b/portage_with_autodep/pym/portage/cache/mappings.py
new file mode 100644
index 0000000..60a918e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/mappings.py
@@ -0,0 +1,485 @@
+# Copyright: 2005-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+__all__ = ["Mapping", "MutableMapping", "UserDict", "ProtectedDict",
+ "LazyLoad", "slot_dict_class"]
+
+import sys
+import weakref
+
+class Mapping(object):
+ """
+ In python-3.0, the UserDict.DictMixin class has been replaced by
+ Mapping and MutableMapping from the collections module, but 2to3
+ doesn't currently account for this change:
+
+ http://bugs.python.org/issue2876
+
+ As a workaround for the above issue, use this class as a substitute
+ for UserDict.DictMixin so that code converted via 2to3 will run.
+ """
+
+ __slots__ = ()
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def keys(self):
+ return list(self.__iter__())
+
+ def __contains__(self, key):
+ try:
+ value = self[key]
+ except KeyError:
+ return False
+ return True
+
+ def iteritems(self):
+ for k in self:
+ yield (k, self[k])
+
+ def iterkeys(self):
+ return self.__iter__()
+
+ def itervalues(self):
+ for _, v in self.items():
+ yield v
+
+ def values(self):
+ return [v for _, v in self.iteritems()]
+
+ def items(self):
+ return list(self.iteritems())
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __repr__(self):
+ return repr(dict(self.items()))
+
+ def __len__(self):
+ return len(list(self))
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
+ values = itervalues
+
+class MutableMapping(Mapping):
+ """
+ A mutable vesion of the Mapping class.
+ """
+
+ __slots__ = ()
+
+ def clear(self):
+ for key in list(self):
+ del self[key]
+
+ def setdefault(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError("pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+
+ def popitem(self):
+ try:
+ k, v = next(iter(self.items()))
+ except StopIteration:
+ raise KeyError('container is empty')
+ del self[k]
+ return (k, v)
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ other = None
+ if args:
+ other = args[0]
+ if other is None:
+ pass
+ elif hasattr(other, 'iteritems'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'iteritems')():
+ self[k] = v
+ elif hasattr(other, 'items'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'items')():
+ self[k] = v
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self[k] = other[k]
+ else:
+ for k, v in other:
+ self[k] = v
+ if kwargs:
+ self.update(kwargs)
+
+class UserDict(MutableMapping):
+ """
+ Use this class as a substitute for UserDict.UserDict so that
+ code converted via 2to3 will run:
+
+ http://bugs.python.org/issue2876
+ """
+
+ __slots__ = ('data',)
+
+ def __init__(self, *args, **kwargs):
+
+ self.data = {}
+
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+
+ if args:
+ self.update(args[0])
+
+ if kwargs:
+ self.update(kwargs)
+
+ def __repr__(self):
+ return repr(self.data)
+
+ def __contains__(self, key):
+ return key in self.data
+
+ def __iter__(self):
+ return iter(self.data)
+
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ def __setitem__(self, key, item):
+ self.data[key] = item
+
+ def __delitem__(self, key):
+ del self.data[key]
+
+ def clear(self):
+ self.data.clear()
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class OrderedDict(UserDict):
+
+ __slots__ = ('_order',)
+
+ def __init__(self, *args, **kwargs):
+ self._order = []
+ UserDict.__init__(self, *args, **kwargs)
+
+ def __iter__(self):
+ return iter(self._order)
+
+ def __setitem__(self, key, item):
+ if key in self:
+ self._order.remove(key)
+ UserDict.__setitem__(self, key, item)
+ self._order.append(key)
+
+ def __delitem__(self, key):
+ UserDict.__delitem__(self, key)
+ self._order.remove(key)
+
+ def clear(self):
+ UserDict.clear(self)
+ del self._order[:]
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class ProtectedDict(MutableMapping):
+ """
+ given an initial dict, this wraps that dict storing changes in a secondary dict, protecting
+ the underlying dict from changes
+ """
+ __slots__=("orig","new","blacklist")
+
+ def __init__(self, orig):
+ self.orig = orig
+ self.new = {}
+ self.blacklist = {}
+
+
+ def __setitem__(self, key, val):
+ self.new[key] = val
+ if key in self.blacklist:
+ del self.blacklist[key]
+
+
+ def __getitem__(self, key):
+ if key in self.new:
+ return self.new[key]
+ if key in self.blacklist:
+ raise KeyError(key)
+ return self.orig[key]
+
+
+ def __delitem__(self, key):
+ if key in self.new:
+ del self.new[key]
+ elif key in self.orig:
+ if key not in self.blacklist:
+ self.blacklist[key] = True
+ return
+ raise KeyError(key)
+
+
+ def __iter__(self):
+ for k in self.new:
+ yield k
+ for k in self.orig:
+ if k not in self.blacklist and k not in self.new:
+ yield k
+
+ def __contains__(self, key):
+ return key in self.new or (key not in self.blacklist and key in self.orig)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class LazyLoad(Mapping):
+ """
+ Lazy loading of values for a dict
+ """
+ __slots__=("pull", "d")
+
+ def __init__(self, pull_items_func, initial_items=[]):
+ self.d = {}
+ for k, v in initial_items:
+ self.d[k] = v
+ self.pull = pull_items_func
+
+ def __getitem__(self, key):
+ if key in self.d:
+ return self.d[key]
+ elif self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return self.d[key]
+
+ def __iter__(self):
+ if self.pull is not None:
+ self.d.update(self.pull())
+ self.pull = None
+ return iter(self.d)
+
+ def __contains__(self, key):
+ if key in self.d:
+ return True
+ elif self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return key in self.d
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+_slot_dict_classes = weakref.WeakValueDictionary()
+
+def slot_dict_class(keys, prefix="_val_"):
+ """
+ Generates mapping classes that behave similar to a dict but store values
+ as object attributes that are allocated via __slots__. Instances of these
+ objects have a smaller memory footprint than a normal dict object.
+
+ @param keys: Fixed set of allowed keys
+ @type keys: Iterable
+ @param prefix: a prefix to use when mapping
+ attribute names from keys
+ @type prefix: String
+ @rtype: SlotDict
+ @returns: A class that constructs SlotDict instances
+ having the specified keys.
+ """
+ if isinstance(keys, frozenset):
+ keys_set = keys
+ else:
+ keys_set = frozenset(keys)
+ v = _slot_dict_classes.get((keys_set, prefix))
+ if v is None:
+
+ class SlotDict(object):
+
+ allowed_keys = keys_set
+ _prefix = prefix
+ __slots__ = ("__weakref__",) + \
+ tuple(prefix + k for k in allowed_keys)
+
+ def __init__(self, *args, **kwargs):
+
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+
+ if args:
+ self.update(args[0])
+
+ if kwargs:
+ self.update(kwargs)
+
+ def __iter__(self):
+ for k, v in self.iteritems():
+ yield k
+
+ def __len__(self):
+ l = 0
+ for i in self.iteritems():
+ l += 1
+ return l
+
+ def keys(self):
+ return list(self)
+
+ def iteritems(self):
+ prefix = self._prefix
+ for k in self.allowed_keys:
+ try:
+ yield (k, getattr(self, prefix + k))
+ except AttributeError:
+ pass
+
+ def items(self):
+ return list(self.iteritems())
+
+ def itervalues(self):
+ for k, v in self.iteritems():
+ yield v
+
+ def values(self):
+ return list(self.itervalues())
+
+ def __delitem__(self, k):
+ try:
+ delattr(self, self._prefix + k)
+ except AttributeError:
+ raise KeyError(k)
+
+ def __setitem__(self, k, v):
+ setattr(self, self._prefix + k, v)
+
+ def setdefault(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ other = None
+ if args:
+ other = args[0]
+ if other is None:
+ pass
+ elif hasattr(other, 'iteritems'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'iteritems')():
+ self[k] = v
+ elif hasattr(other, 'items'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'items')():
+ self[k] = v
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self[k] = other[k]
+ else:
+ for k, v in other:
+ self[k] = v
+ if kwargs:
+ self.update(kwargs)
+
+ def __getitem__(self, k):
+ try:
+ return getattr(self, self._prefix + k)
+ except AttributeError:
+ raise KeyError(k)
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __contains__(self, k):
+ return hasattr(self, self._prefix + k)
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+
+ def popitem(self):
+ try:
+ k, v = self.iteritems().next()
+ except StopIteration:
+ raise KeyError('container is empty')
+ del self[k]
+ return (k, v)
+
+ def copy(self):
+ c = self.__class__()
+ c.update(self)
+ return c
+
+ def clear(self):
+ for k in self.allowed_keys:
+ try:
+ delattr(self, self._prefix + k)
+ except AttributeError:
+ pass
+
+ def __str__(self):
+ return str(dict(self.iteritems()))
+
+ def __repr__(self):
+ return repr(dict(self.iteritems()))
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
+ values = itervalues
+
+ v = SlotDict
+ _slot_dict_classes[v.allowed_keys] = v
+ return v
diff --git a/portage_with_autodep/pym/portage/cache/metadata.py b/portage_with_autodep/pym/portage/cache/metadata.py
new file mode 100644
index 0000000..4c735d7
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/metadata.py
@@ -0,0 +1,154 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import errno
+import re
+import stat
+import sys
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.cache import cache_errors, flat_hash
+import portage.eclass_cache
+from portage.cache.template import reconstruct_eclasses
+from portage.cache.mappings import ProtectedDict
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+# this is the old cache format, flat_list. count maintained here.
+magic_line_count = 22
+
+# store the current key order *here*.
+class database(flat_hash.database):
+ complete_eclass_entries = False
+ auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
+ 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
+
+ autocommits = True
+ serialize_eclasses = False
+
+ _hashed_re = re.compile('^(\\w+)=([^\n]*)')
+
+ def __init__(self, location, *args, **config):
+ loc = location
+ super(database, self).__init__(location, *args, **config)
+ self.location = os.path.join(loc, "metadata","cache")
+ self.ec = None
+ self.raise_stat_collision = False
+
+ def _parse_data(self, data, cpv):
+ _hashed_re_match = self._hashed_re.match
+ d = {}
+
+ for line in data:
+ hashed = False
+ hashed_match = _hashed_re_match(line)
+ if hashed_match is None:
+ d.clear()
+ try:
+ for i, key in enumerate(self.auxdbkey_order):
+ d[key] = data[i]
+ except IndexError:
+ pass
+ break
+ else:
+ d[hashed_match.group(1)] = hashed_match.group(2)
+
+ if "_eclasses_" not in d:
+ if "INHERITED" in d:
+ if self.ec is None:
+ self.ec = portage.eclass_cache.cache(self.location[:-15])
+ try:
+ d["_eclasses_"] = self.ec.get_eclass_data(
+ d["INHERITED"].split())
+ except KeyError as e:
+ # INHERITED contains a non-existent eclass.
+ raise cache_errors.CacheCorruption(cpv, e)
+ del d["INHERITED"]
+ else:
+ d["_eclasses_"] = {}
+ elif isinstance(d["_eclasses_"], basestring):
+ # We skip this if flat_hash.database._parse_data() was called above
+ # because it calls reconstruct_eclasses() internally.
+ d["_eclasses_"] = reconstruct_eclasses(None, d["_eclasses_"])
+
+ return d
+
+ def _setitem(self, cpv, values):
+ if "_eclasses_" in values:
+ values = ProtectedDict(values)
+ values["INHERITED"] = ' '.join(sorted(values["_eclasses_"]))
+
+ new_content = []
+ for k in self.auxdbkey_order:
+ new_content.append(values.get(k, ''))
+ new_content.append('\n')
+ for i in range(magic_line_count - len(self.auxdbkey_order)):
+ new_content.append('\n')
+ new_content = ''.join(new_content)
+ new_content = _unicode_encode(new_content,
+ _encodings['repo.content'], errors='backslashreplace')
+
+ new_fp = os.path.join(self.location, cpv)
+ try:
+ f = open(_unicode_encode(new_fp,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ except EnvironmentError:
+ pass
+ else:
+ try:
+ try:
+ existing_st = os.fstat(f.fileno())
+ existing_content = f.read()
+ finally:
+ f.close()
+ except EnvironmentError:
+ pass
+ else:
+ existing_mtime = existing_st[stat.ST_MTIME]
+ if values['_mtime_'] == existing_mtime and \
+ existing_content == new_content:
+ return
+
+ if self.raise_stat_collision and \
+ values['_mtime_'] == existing_mtime and \
+ len(new_content) == existing_st.st_size:
+ raise cache_errors.StatCollision(cpv, new_fp,
+ existing_mtime, existing_st.st_size)
+
+ s = cpv.rfind("/")
+ fp = os.path.join(self.location,cpv[:s],
+ ".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try:
+ myf = open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ except EnvironmentError as e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf = open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ except EnvironmentError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try:
+ myf.write(new_content)
+ finally:
+ myf.close()
+ self._ensure_access(fp, mtime=values["_mtime_"])
+
+ try:
+ os.rename(fp, new_fp)
+ except EnvironmentError as e:
+ try:
+ os.unlink(fp)
+ except EnvironmentError:
+ pass
+ raise cache_errors.CacheCorruption(cpv, e)
diff --git a/portage_with_autodep/pym/portage/cache/metadata_overlay.py b/portage_with_autodep/pym/portage/cache/metadata_overlay.py
new file mode 100644
index 0000000..cfa0051
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/metadata_overlay.py
@@ -0,0 +1,105 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.cache import template
+from portage.cache.cache_errors import CacheCorruption
+from portage.cache.flat_hash import database as db_rw
+from portage.cache.metadata import database as db_ro
+
+class database(template.database):
+
+ serialize_eclasses = False
+
+ def __init__(self, location, label, auxdbkeys, db_rw=db_rw, db_ro=db_ro,
+ *args, **config):
+ super_config = config.copy()
+ super_config.pop("gid", None)
+ super_config.pop("perms", None)
+ super(database, self).__init__(location, label, auxdbkeys,
+ *args, **super_config)
+ self.db_rw = db_rw(location, label, auxdbkeys, **config)
+ self.commit = self.db_rw.commit
+ self.autocommits = self.db_rw.autocommits
+ if isinstance(db_ro, type):
+ ro_config = config.copy()
+ ro_config["readonly"] = True
+ self.db_ro = db_ro(label, "metadata/cache", auxdbkeys, **ro_config)
+ else:
+ self.db_ro = db_ro
+
+ def __getitem__(self, cpv):
+ """funnel whiteout validation through here, since value needs to be fetched"""
+ try:
+ value = self.db_rw[cpv]
+ except KeyError:
+ return self.db_ro[cpv] # raises a KeyError when necessary
+ except CacheCorruption:
+ del self.db_rw[cpv]
+ return self.db_ro[cpv] # raises a KeyError when necessary
+ if self._is_whiteout(value):
+ if self._is_whiteout_valid(cpv, value):
+ raise KeyError(cpv)
+ else:
+ del self.db_rw[cpv]
+ return self.db_ro[cpv] # raises a KeyError when necessary
+ else:
+ return value
+
+ def _setitem(self, name, values):
+ try:
+ value_ro = self.db_ro.get(name)
+ except CacheCorruption:
+ value_ro = None
+ if value_ro is not None and \
+ self._are_values_identical(value_ro, values):
+ # we have matching values in the underlying db_ro
+ # so it is unnecessary to store data in db_rw
+ try:
+ del self.db_rw[name] # delete unwanted whiteout when necessary
+ except KeyError:
+ pass
+ return
+ self.db_rw[name] = values
+
+ def _delitem(self, cpv):
+ value = self[cpv] # validates whiteout and/or raises a KeyError when necessary
+ if cpv in self.db_ro:
+ self.db_rw[cpv] = self._create_whiteout(value)
+ else:
+ del self.db_rw[cpv]
+
+ def __contains__(self, cpv):
+ try:
+ self[cpv] # validates whiteout when necessary
+ except KeyError:
+ return False
+ return True
+
+ def __iter__(self):
+ s = set()
+ for cpv in self.db_rw:
+ if cpv in self: # validates whiteout when necessary
+ yield cpv
+ # set includes whiteouts so they won't be yielded later
+ s.add(cpv)
+ for cpv in self.db_ro:
+ if cpv not in s:
+ yield cpv
+
+ def _is_whiteout(self, value):
+ return value["EAPI"] == "whiteout"
+
+ def _create_whiteout(self, value):
+ return {"EAPI":"whiteout","_eclasses_":value["_eclasses_"],"_mtime_":value["_mtime_"]}
+
+ def _is_whiteout_valid(self, name, value_rw):
+ try:
+ value_ro = self.db_ro[name]
+ return self._are_values_identical(value_rw,value_ro)
+ except KeyError:
+ return False
+
+ def _are_values_identical(self, value1, value2):
+ if value1['_mtime_'] != value2['_mtime_']:
+ return False
+ return value1["_eclasses_"] == value2["_eclasses_"]
diff --git a/portage_with_autodep/pym/portage/cache/sql_template.py b/portage_with_autodep/pym/portage/cache/sql_template.py
new file mode 100644
index 0000000..d023b1b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/sql_template.py
@@ -0,0 +1,301 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import sys
+from portage.cache import template, cache_errors
+from portage.cache.template import reconstruct_eclasses
+
+class SQLDatabase(template.database):
+ """template class for RDBM based caches
+
+ This class is designed such that derivatives don't have to change much code, mostly constant strings.
+ _BaseError must be an exception class that all Exceptions thrown from the derived RDBMS are derived
+ from.
+
+ SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the RDBMS, as should SCHEMA_PACKAGE_CREATE-
+ basically you need to deal with creation of a unique pkgid. If the dbapi2 rdbms class has a method of
+ recovering that id, then modify _insert_cpv to remove the extra select.
+
+ Creation of a derived class involves supplying _initdb_con, and table_exists.
+ Additionally, the default schemas may have to be modified.
+ """
+
+ SCHEMA_PACKAGE_NAME = "package_cache"
+ SCHEMA_PACKAGE_CREATE = "CREATE TABLE %s (\
+ pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME
+ SCHEMA_PACKAGE_DROP = "DROP TABLE %s" % SCHEMA_PACKAGE_NAME
+
+ SCHEMA_VALUES_NAME = "values_cache"
+ SCHEMA_VALUES_CREATE = "CREATE TABLE %s ( pkgid integer references %s (pkgid) on delete cascade, \
+ key varchar(255), value text, UNIQUE(pkgid, key))" % (SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME)
+ SCHEMA_VALUES_DROP = "DROP TABLE %s" % SCHEMA_VALUES_NAME
+ SCHEMA_INSERT_CPV_INTO_PACKAGE = "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME
+
+ _BaseError = ()
+ _dbClass = None
+
+ autocommits = False
+# cleanse_keys = True
+
+ # boolean indicating if the derived RDBMS class supports replace syntax
+ _supports_replace = False
+
+ def __init__(self, location, label, auxdbkeys, *args, **config):
+ """initialize the instance.
+ derived classes shouldn't need to override this"""
+
+ super(SQLDatabase, self).__init__(location, label, auxdbkeys, *args, **config)
+
+ config.setdefault("host","127.0.0.1")
+ config.setdefault("autocommit", self.autocommits)
+ self._initdb_con(config)
+
+ self.label = self._sfilter(self.label)
+
+
+ def _dbconnect(self, config):
+ """should be overridden if the derived class needs special parameters for initializing
+ the db connection, or cursor"""
+ self.db = self._dbClass(**config)
+ self.con = self.db.cursor()
+
+
+ def _initdb_con(self,config):
+ """ensure needed tables are in place.
+ If the derived class needs a different set of table creation commands, overload the approriate
+ SCHEMA_ attributes. If it needs additional execution beyond, override"""
+
+ self._dbconnect(config)
+ if not self._table_exists(self.SCHEMA_PACKAGE_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_PACKAGE_NAME)
+ try:
+ self.con.execute(self.SCHEMA_PACKAGE_CREATE)
+ except self._BaseError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ if not self._table_exists(self.SCHEMA_VALUES_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_VALUES_NAME)
+ try:
+ self.con.execute(self.SCHEMA_VALUES_CREATE)
+ except self._BaseError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+
+ def _table_exists(self, tbl):
+ """return true if a table exists
+ derived classes must override this"""
+ raise NotImplementedError
+
+
+ def _sfilter(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"")
+
+
+ def _getitem(self, cpv):
+ try:
+ self.con.execute("SELECT key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label, self._sfilter(cpv)))
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ rows = self.con.fetchall()
+
+ if len(rows) == 0:
+ raise KeyError(cpv)
+
+ vals = dict([(k,"") for k in self._known_keys])
+ vals.update(dict(rows))
+ return vals
+
+
+ def _delitem(self, cpv):
+ """delete a cpv cache entry
+ derived RDBM classes for this *must* either support cascaded deletes, or
+ override this method"""
+ try:
+ try:
+ self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ if self.autocommits:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+ if self.con.rowcount <= 0:
+ raise KeyError(cpv)
+ except SystemExit:
+ raise
+ except Exception:
+ if not self.autocommits:
+ self.db.rollback()
+ # yes, this can roll back a lot more then just the delete. deal.
+ raise
+
+ def __del__(self):
+ # just to be safe.
+ if "db" in self.__dict__ and self.db != None:
+ self.commit()
+ self.db.close()
+
+ def _setitem(self, cpv, values):
+
+ try:
+ # insert.
+ try:
+ pkgid = self._insert_cpv(cpv)
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ # __getitem__ fills out missing values,
+ # so we store only what's handed to us and is a known key
+ db_values = []
+ for key in self._known_keys:
+ if key in values and values[key]:
+ db_values.append({"key":key, "value":values[key]})
+
+ if len(db_values) > 0:
+ try:
+ self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \
+ (self.SCHEMA_VALUES_NAME, str(pkgid)), db_values)
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ if self.autocommits:
+ self.commit()
+
+ except SystemExit:
+ raise
+ except Exception:
+ if not self.autocommits:
+ try:
+ self.db.rollback()
+ except self._BaseError:
+ pass
+ raise
+
+
+ def _insert_cpv(self, cpv):
+ """uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded if the table definition
+ doesn't support auto-increment columns for pkgid.
+ returns the cpvs new pkgid
+ note this doesn't commit the transaction. The caller is expected to."""
+
+ cpv = self._sfilter(cpv)
+ if self._supports_replace:
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1)
+ else:
+ # just delete it.
+ try:
+ del self[cpv]
+ except (cache_errors.CacheCorruption, KeyError):
+ pass
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE
+ try:
+ self.con.execute(query_str % (self.label, cpv))
+ except self._BaseError:
+ self.db.rollback()
+ raise
+ self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, cpv))
+
+ if self.con.rowcount != 1:
+ raise cache_error.CacheCorruption(cpv, "Tried to insert the cpv, but found "
+ " %i matches upon the following select!" % len(rows))
+ return self.con.fetchone()[0]
+
+
+ def __contains__(self, cpv):
+ if not self.autocommits:
+ try:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try:
+ self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+ return self.con.rowcount > 0
+
+
+ def __iter__(self):
+ if not self.autocommits:
+ try:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try:
+ self.con.execute("SELECT cpv FROM %s WHERE label=%s" %
+ (self.SCHEMA_PACKAGE_NAME, self.label))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+# return [ row[0] for row in self.con.fetchall() ]
+ for x in self.con.fetchall():
+ yield x[0]
+
+ def iteritems(self):
+ try:
+ self.con.execute("SELECT cpv, key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label))
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ oldcpv = None
+ l = []
+ for x, y, v in self.con.fetchall():
+ if oldcpv != x:
+ if oldcpv != None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+ else:
+ d["_eclasses_"] = {}
+ yield cpv, d
+ l.clear()
+ oldcpv = x
+ l.append((y,v))
+ if oldcpv != None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+ else:
+ d["_eclasses_"] = {}
+ yield cpv, d
+
+ def commit(self):
+ self.db.commit()
+
+ def get_matches(self,match_dict):
+ query_list = []
+ for k,v in match_dict.items():
+ if k not in self._known_keys:
+ raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance")
+ v = v.replace("%","\\%")
+ v = v.replace(".*","%")
+ query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v)))
+
+ if len(query_list):
+ query = " AND "+" AND ".join(query_list)
+ else:
+ query = ''
+
+ print("query = SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % (self.label, query))
+ try:
+ self.con.execute("SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % \
+ (self.label, query))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ return [ row[0] for row in self.con.fetchall() ]
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
diff --git a/portage_with_autodep/pym/portage/cache/sqlite.py b/portage_with_autodep/pym/portage/cache/sqlite.py
new file mode 100644
index 0000000..fcc62ff
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/sqlite.py
@@ -0,0 +1,245 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.cache import fs_template
+from portage.cache import cache_errors
+from portage import os
+from portage import _unicode_decode
+from portage.util import writemsg
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class database(fs_template.FsBased):
+
+ autocommits = False
+ synchronous = False
+ # cache_bytes is used together with page_size (set at sqlite build time)
+ # to calculate the number of pages requested, according to the following
+ # equation: cache_bytes = page_bytes * page_count
+ cache_bytes = 1024 * 1024 * 10
+ _db_table = None
+
+ def __init__(self, *args, **config):
+ super(database, self).__init__(*args, **config)
+ self._import_sqlite()
+ self._allowed_keys = ["_mtime_", "_eclasses_"]
+ self._allowed_keys.extend(self._known_keys)
+ self._allowed_keys.sort()
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if not self.readonly and not os.path.exists(self.location):
+ self._ensure_dirs()
+
+ config.setdefault("autocommit", self.autocommits)
+ config.setdefault("cache_bytes", self.cache_bytes)
+ config.setdefault("synchronous", self.synchronous)
+ # Timeout for throwing a "database is locked" exception (pysqlite
+ # default is 5.0 seconds).
+ config.setdefault("timeout", 15)
+ self._db_init_connection(config)
+ self._db_init_structures()
+
+ def _import_sqlite(self):
+ # sqlite3 is optional with >=python-2.5
+ try:
+ import sqlite3 as db_module
+ except ImportError:
+ try:
+ from pysqlite2 import dbapi2 as db_module
+ except ImportError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ self._db_module = db_module
+ self._db_error = db_module.Error
+
+ def _db_escape_string(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ if not isinstance(s, basestring):
+ # Avoid potential UnicodeEncodeError in python-2.x by
+ # only calling str() when it's absolutely necessary.
+ s = str(s)
+ # This is equivalent to the _quote function from pysqlite 1.1.
+ return "'%s'" % s.replace("'", "''")
+
+ def _db_init_connection(self, config):
+ self._dbpath = self.location + ".sqlite"
+ #if os.path.exists(self._dbpath):
+ # os.unlink(self._dbpath)
+ connection_kwargs = {}
+ connection_kwargs["timeout"] = config["timeout"]
+ try:
+ if not self.readonly:
+ self._ensure_dirs()
+ self._db_connection = self._db_module.connect(
+ database=_unicode_decode(self._dbpath), **connection_kwargs)
+ self._db_cursor = self._db_connection.cursor()
+ self._db_cursor.execute("PRAGMA encoding = %s" % self._db_escape_string("UTF-8"))
+ if not self.readonly and not self._ensure_access(self._dbpath):
+ raise cache_errors.InitializationError(self.__class__, "can't ensure perms on %s" % self._dbpath)
+ self._db_init_cache_size(config["cache_bytes"])
+ self._db_init_synchronous(config["synchronous"])
+ except self._db_error as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ def _db_init_structures(self):
+ self._db_table = {}
+ self._db_table["packages"] = {}
+ mytable = "portage_packages"
+ self._db_table["packages"]["table_name"] = mytable
+ self._db_table["packages"]["package_id"] = "internal_db_package_id"
+ self._db_table["packages"]["package_key"] = "portage_package_key"
+ self._db_table["packages"]["internal_columns"] = \
+ [self._db_table["packages"]["package_id"],
+ self._db_table["packages"]["package_key"]]
+ create_statement = []
+ create_statement.append("CREATE TABLE")
+ create_statement.append(mytable)
+ create_statement.append("(")
+ table_parameters = []
+ table_parameters.append("%s INTEGER PRIMARY KEY AUTOINCREMENT" % self._db_table["packages"]["package_id"])
+ table_parameters.append("%s TEXT" % self._db_table["packages"]["package_key"])
+ for k in self._allowed_keys:
+ table_parameters.append("%s TEXT" % k)
+ table_parameters.append("UNIQUE(%s)" % self._db_table["packages"]["package_key"])
+ create_statement.append(",".join(table_parameters))
+ create_statement.append(")")
+
+ self._db_table["packages"]["create"] = " ".join(create_statement)
+ self._db_table["packages"]["columns"] = \
+ self._db_table["packages"]["internal_columns"] + \
+ self._allowed_keys
+
+ cursor = self._db_cursor
+ for k, v in self._db_table.items():
+ if self._db_table_exists(v["table_name"]):
+ create_statement = self._db_table_get_create(v["table_name"])
+ if create_statement != v["create"]:
+ writemsg(_("sqlite: dropping old table: %s\n") % v["table_name"])
+ cursor.execute("DROP TABLE %s" % v["table_name"])
+ cursor.execute(v["create"])
+ else:
+ cursor.execute(v["create"])
+
+ def _db_table_exists(self, table_name):
+ """return true/false dependant on a tbl existing"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT name FROM sqlite_master WHERE type=\"table\" AND name=%s" % \
+ self._db_escape_string(table_name))
+ return len(cursor.fetchall()) == 1
+
+ def _db_table_get_create(self, table_name):
+ """return true/false dependant on a tbl existing"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT sql FROM sqlite_master WHERE name=%s" % \
+ self._db_escape_string(table_name))
+ return cursor.fetchall()[0][0]
+
+ def _db_init_cache_size(self, cache_bytes):
+ cursor = self._db_cursor
+ cursor.execute("PRAGMA page_size")
+ page_size=int(cursor.fetchone()[0])
+ # number of pages, sqlite default is 2000
+ cache_size = cache_bytes / page_size
+ cursor.execute("PRAGMA cache_size = %d" % cache_size)
+ cursor.execute("PRAGMA cache_size")
+ actual_cache_size = int(cursor.fetchone()[0])
+ del cursor
+ if actual_cache_size != cache_size:
+ raise cache_errors.InitializationError(self.__class__,"actual cache_size = "+actual_cache_size+" does does not match requested size of "+cache_size)
+
+ def _db_init_synchronous(self, synchronous):
+ cursor = self._db_cursor
+ cursor.execute("PRAGMA synchronous = %d" % synchronous)
+ cursor.execute("PRAGMA synchronous")
+ actual_synchronous=int(cursor.fetchone()[0])
+ del cursor
+ if actual_synchronous!=synchronous:
+ raise cache_errors.InitializationError(self.__class__,"actual synchronous = "+actual_synchronous+" does does not match requested value of "+synchronous)
+
+ def _getitem(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute("select * from %s where %s=%s" % \
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv)))
+ result = cursor.fetchall()
+ if len(result) == 1:
+ pass
+ elif len(result) == 0:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, "key is not unique")
+ d = {}
+ internal_columns = self._db_table["packages"]["internal_columns"]
+ column_index = -1
+ for k in self._db_table["packages"]["columns"]:
+ column_index +=1
+ if k not in internal_columns:
+ d[k] = result[0][column_index]
+
+ return d
+
+ def _setitem(self, cpv, values):
+ update_statement = []
+ update_statement.append("REPLACE INTO %s" % self._db_table["packages"]["table_name"])
+ update_statement.append("(")
+ update_statement.append(','.join([self._db_table["packages"]["package_key"]] + self._allowed_keys))
+ update_statement.append(")")
+ update_statement.append("VALUES")
+ update_statement.append("(")
+ values_parameters = []
+ values_parameters.append(self._db_escape_string(cpv))
+ for k in self._allowed_keys:
+ values_parameters.append(self._db_escape_string(values.get(k, '')))
+ update_statement.append(",".join(values_parameters))
+ update_statement.append(")")
+ cursor = self._db_cursor
+ try:
+ s = " ".join(update_statement)
+ cursor.execute(s)
+ except self._db_error as e:
+ writemsg("%s: %s\n" % (cpv, str(e)))
+ raise
+
+ def commit(self):
+ self._db_connection.commit()
+
+ def _delitem(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute("DELETE FROM %s WHERE %s=%s" % \
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv)))
+
+ def __contains__(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute(" ".join(
+ ["SELECT %s FROM %s" %
+ (self._db_table["packages"]["package_id"],
+ self._db_table["packages"]["table_name"]),
+ "WHERE %s=%s" % (
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv))]))
+ result = cursor.fetchall()
+ if len(result) == 0:
+ return False
+ elif len(result) == 1:
+ return True
+ else:
+ raise cache_errors.CacheCorruption(cpv, "key is not unique")
+
+ def __iter__(self):
+ """generator for walking the dir struct"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT %s FROM %s" % \
+ (self._db_table["packages"]["package_key"],
+ self._db_table["packages"]["table_name"]))
+ result = cursor.fetchall()
+ key_list = [x[0] for x in result]
+ del result
+ while key_list:
+ yield key_list.pop()
diff --git a/portage_with_autodep/pym/portage/cache/template.py b/portage_with_autodep/pym/portage/cache/template.py
new file mode 100644
index 0000000..f84d8f4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/template.py
@@ -0,0 +1,236 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+from portage.cache import cache_errors
+from portage.cache.cache_errors import InvalidRestriction
+from portage.cache.mappings import ProtectedDict
+import sys
+import warnings
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class database(object):
+ # this is for metadata/cache transfer.
+ # basically flags the cache needs be updated when transfered cache to cache.
+ # leave this.
+
+ complete_eclass_entries = True
+ autocommits = False
+ cleanse_keys = False
+ serialize_eclasses = True
+
+ def __init__(self, location, label, auxdbkeys, readonly=False):
+ """ initialize the derived class; specifically, store label/keys"""
+ self._known_keys = auxdbkeys
+ self.location = location
+ self.label = label
+ self.readonly = readonly
+ self.sync_rate = 0
+ self.updates = 0
+
+ def __getitem__(self, cpv):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the __eclasses__ conversion.
+ that said, if the class handles it, they can override it."""
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+ d=self._getitem(cpv)
+ if self.serialize_eclasses and "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"])
+ elif "_eclasses_" not in d:
+ d["_eclasses_"] = {}
+ mtime = d.get('_mtime_')
+ if mtime is None:
+ raise cache_errors.CacheCorruption(cpv,
+ '_mtime_ field is missing')
+ try:
+ mtime = long(mtime)
+ except ValueError:
+ raise cache_errors.CacheCorruption(cpv,
+ '_mtime_ conversion to long failed: %s' % (mtime,))
+ d['_mtime_'] = mtime
+ return d
+
+ def _getitem(self, cpv):
+ """get cpv's values.
+ override this in derived classess"""
+ raise NotImplementedError
+
+ def __setitem__(self, cpv, values):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ if self.cleanse_keys:
+ d=ProtectedDict(values)
+ for k, v in list(d.items()):
+ if not v:
+ del d[k]
+ if self.serialize_eclasses and "_eclasses_" in values:
+ d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
+ elif self.serialize_eclasses and "_eclasses_" in values:
+ d = ProtectedDict(values)
+ d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
+ else:
+ d = values
+ self._setitem(cpv, d)
+ if not self.autocommits:
+ self.updates += 1
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _setitem(self, name, values):
+ """__setitem__ calls this after readonly checks. override it in derived classes
+ note _eclassees_ key *must* be handled"""
+ raise NotImplementedError
+
+ def __delitem__(self, cpv):
+ """delete a key from the cache.
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ if not self.autocommits:
+ self.updates += 1
+ self._delitem(cpv)
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _delitem(self,cpv):
+ """__delitem__ calls this after readonly checks. override it in derived classes"""
+ raise NotImplementedError
+
+ def has_key(self, cpv):
+ return cpv in self
+
+ def keys(self):
+ return list(self)
+
+ def iterkeys(self):
+ return iter(self)
+
+ def iteritems(self):
+ for x in self:
+ yield (x, self[x])
+
+ def items(self):
+ return list(self.iteritems())
+
+ def sync(self, rate=0):
+ self.sync_rate = rate
+ if(rate == 0):
+ self.commit()
+
+ def commit(self):
+ if not self.autocommits:
+ raise NotImplementedError
+
+ def __contains__(self, cpv):
+ """This method should always be overridden. It is provided only for
+ backward compatibility with modules that override has_key instead. It
+ will automatically raise a NotImplementedError if has_key has not been
+ overridden."""
+ if self.has_key is database.has_key:
+ # prevent a possible recursive loop
+ raise NotImplementedError
+ warnings.warn("portage.cache.template.database.has_key() is "
+ "deprecated, override __contains__ instead",
+ DeprecationWarning)
+ return self.has_key(cpv)
+
+ def __iter__(self):
+ """This method should always be overridden. It is provided only for
+ backward compatibility with modules that override iterkeys instead. It
+ will automatically raise a NotImplementedError if iterkeys has not been
+ overridden."""
+ if self.iterkeys is database.iterkeys:
+ # prevent a possible recursive loop
+ raise NotImplementedError(self)
+ return iter(self.keys())
+
+ def get(self, k, x=None):
+ try:
+ return self[k]
+ except KeyError:
+ return x
+
+ def get_matches(self, match_dict):
+ """generic function for walking the entire cache db, matching restrictions to
+ filter what cpv's are returned. Derived classes should override this if they
+ can implement a faster method then pulling each cpv:values, and checking it.
+
+ For example, RDBMS derived classes should push the matching logic down to the
+ actual RDBM."""
+
+ import re
+ restricts = {}
+ for key,match in match_dict.items():
+ # XXX this sucks.
+ try:
+ if isinstance(match, basestring):
+ restricts[key] = re.compile(match).match
+ else:
+ restricts[key] = re.compile(match[0],match[1]).match
+ except re.error as e:
+ raise InvalidRestriction(key, match, e)
+ if key not in self.__known_keys:
+ raise InvalidRestriction(key, match, "Key isn't valid")
+
+ for cpv in self:
+ cont = True
+ vals = self[cpv]
+ for key, match in restricts.items():
+ if not match(vals[key]):
+ cont = False
+ break
+ if cont:
+ yield cpv
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
+
+def serialize_eclasses(eclass_dict):
+ """takes a dict, returns a string representing said dict"""
+ """The "new format", which causes older versions of <portage-2.1.2 to
+ traceback with a ValueError due to failed long() conversion. This format
+ isn't currently written, but the the capability to read it is already built
+ in.
+ return "\t".join(["%s\t%s" % (k, str(v)) \
+ for k, v in eclass_dict.iteritems()])
+ """
+ if not eclass_dict:
+ return ""
+ return "\t".join(k + "\t%s\t%s" % eclass_dict[k] \
+ for k in sorted(eclass_dict))
+
+def reconstruct_eclasses(cpv, eclass_string):
+ """returns a dict when handed a string generated by serialize_eclasses"""
+ eclasses = eclass_string.rstrip().lstrip().split("\t")
+ if eclasses == [""]:
+ # occasionally this occurs in the fs backends. they suck.
+ return {}
+
+ if len(eclasses) % 2 != 0 and len(eclasses) % 3 != 0:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
+ d={}
+ try:
+ if eclasses[1].isdigit():
+ for x in range(0, len(eclasses), 2):
+ d[eclasses[x]] = ("", long(eclasses[x + 1]))
+ else:
+ # The old format contains paths that will be discarded.
+ for x in range(0, len(eclasses), 3):
+ d[eclasses[x]] = (eclasses[x + 1], long(eclasses[x + 2]))
+ except IndexError:
+ raise cache_errors.CacheCorruption(cpv,
+ "_eclasses_ was of invalid len %i" % len(eclasses))
+ except ValueError:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ mtime conversion to long failed")
+ del eclasses
+ return d
diff --git a/portage_with_autodep/pym/portage/cache/util.py b/portage_with_autodep/pym/portage/cache/util.py
new file mode 100644
index 0000000..b824689
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/util.py
@@ -0,0 +1,170 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+from __future__ import print_function
+
+__all__ = ["mirror_cache", "non_quiet_mirroring", "quiet_mirroring"]
+
+from itertools import chain
+from portage.cache import cache_errors
+from portage.localization import _
+
+def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, eclass_cache=None, verbose_instance=None):
+
+ from portage import eapi_is_supported, \
+ _validate_cache_for_unsupported_eapis
+ if not src_cache.complete_eclass_entries and not eclass_cache:
+ raise Exception("eclass_cache required for cache's of class %s!" % src_cache.__class__)
+
+ if verbose_instance == None:
+ noise=quiet_mirroring()
+ else:
+ noise=verbose_instance
+
+ dead_nodes = set(trg_cache)
+ count=0
+
+ if not trg_cache.autocommits:
+ trg_cache.sync(100)
+
+ for x in valid_nodes_iterable:
+# print "processing x=",x
+ count+=1
+ dead_nodes.discard(x)
+ try:
+ entry = src_cache[x]
+ except KeyError as e:
+ noise.missing_entry(x)
+ del e
+ continue
+ except cache_errors.CacheError as ce:
+ noise.exception(x, ce)
+ del ce
+ continue
+
+ eapi = entry.get('EAPI')
+ if not eapi:
+ eapi = '0'
+ eapi = eapi.lstrip('-')
+ eapi_supported = eapi_is_supported(eapi)
+ if not eapi_supported:
+ if not _validate_cache_for_unsupported_eapis:
+ noise.misc(x, _("unable to validate cache for EAPI='%s'") % eapi)
+ continue
+
+ write_it = True
+ trg = None
+ try:
+ trg = trg_cache[x]
+ except (KeyError, cache_errors.CacheError):
+ pass
+ else:
+ if trg['_mtime_'] == entry['_mtime_'] and \
+ eclass_cache.is_eclass_data_valid(trg['_eclasses_']) and \
+ set(trg['_eclasses_']) == set(entry['_eclasses_']):
+ write_it = False
+
+ for d in (entry, trg):
+ if d is not None and d.get('EAPI') in ('', '0'):
+ del d['EAPI']
+
+ if trg and not write_it:
+ """ We don't want to skip the write unless we're really sure that
+ the existing cache is identical, so don't trust _mtime_ and
+ _eclasses_ alone."""
+ for k in set(chain(entry, trg)).difference(
+ ("_mtime_", "_eclasses_")):
+ if trg.get(k, "") != entry.get(k, ""):
+ write_it = True
+ break
+
+ if write_it:
+ try:
+ inherited = entry.get("INHERITED", "")
+ eclasses = entry.get("_eclasses_")
+ except cache_errors.CacheError as ce:
+ noise.exception(x, ce)
+ del ce
+ continue
+
+ if eclasses is not None:
+ if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]):
+ noise.eclass_stale(x)
+ continue
+ inherited = eclasses
+ else:
+ inherited = inherited.split()
+
+ if inherited:
+ if src_cache.complete_eclass_entries and eclasses is None:
+ noise.corruption(x, "missing _eclasses_ field")
+ continue
+
+ # Even if _eclasses_ already exists, replace it with data from
+ # eclass_cache, in order to insert local eclass paths.
+ try:
+ eclasses = eclass_cache.get_eclass_data(inherited)
+ except KeyError:
+ # INHERITED contains a non-existent eclass.
+ noise.eclass_stale(x)
+ continue
+
+ if eclasses is None:
+ noise.eclass_stale(x)
+ continue
+ entry["_eclasses_"] = eclasses
+
+ if not eapi_supported:
+ for k in set(entry).difference(("_mtime_", "_eclasses_")):
+ entry[k] = ""
+ entry["EAPI"] = "-" + eapi
+
+ # by this time, if it reaches here, the eclass has been validated, and the entry has
+ # been updated/translated (if needs be, for metadata/cache mainly)
+ try:
+ trg_cache[x] = entry
+ except cache_errors.CacheError as ce:
+ noise.exception(x, ce)
+ del ce
+ continue
+ if count >= noise.call_update_min:
+ noise.update(x)
+ count = 0
+
+ if not trg_cache.autocommits:
+ trg_cache.commit()
+
+ # ok. by this time, the trg_cache is up to date, and we have a dict
+ # with a crapload of cpv's. we now walk the target db, removing stuff if it's in the list.
+ for key in dead_nodes:
+ try:
+ del trg_cache[key]
+ except KeyError:
+ pass
+ except cache_errors.CacheError as ce:
+ noise.exception(ce)
+ del ce
+ noise.finish()
+
+
+class quiet_mirroring(object):
+ # call_update_every is used by mirror_cache to determine how often to call in.
+ # quiet defaults to 2^24 -1. Don't call update, 'cept once every 16 million or so :)
+ call_update_min = 0xffffff
+ def update(self,key,*arg): pass
+ def exception(self,key,*arg): pass
+ def eclass_stale(self,*arg): pass
+ def missing_entry(self, key): pass
+ def misc(self,key,*arg): pass
+ def corruption(self, key, s): pass
+ def finish(self, *arg): pass
+
+class non_quiet_mirroring(quiet_mirroring):
+ call_update_min=1
+ def update(self,key,*arg): print("processed",key)
+ def exception(self, key, *arg): print("exec",key,arg)
+ def missing(self,key): print("key %s is missing", key)
+ def corruption(self,key,*arg): print("corrupt %s:" % key,arg)
+ def eclass_stale(self,key,*arg):print("stale %s:"%key,arg)
+
diff --git a/portage_with_autodep/pym/portage/cache/volatile.py b/portage_with_autodep/pym/portage/cache/volatile.py
new file mode 100644
index 0000000..0bf6bab
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/volatile.py
@@ -0,0 +1,25 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from portage.cache import template
+
+class database(template.database):
+
+ autocommits = True
+ serialize_eclasses = False
+
+ def __init__(self, *args, **config):
+ config.pop("gid", None)
+ config.pop("perms", None)
+ super(database, self).__init__(*args, **config)
+ self._data = {}
+ self.__iter__ = self._data.__iter__
+ self._delitem = self._data.__delitem__
+ self.__contains__ = self._data.__contains__
+
+ def _setitem(self, name, values):
+ self._data[name] = copy.deepcopy(values)
+
+ def _getitem(self, cpv):
+ return copy.deepcopy(self._data[cpv])
diff --git a/portage_with_autodep/pym/portage/checksum.py b/portage_with_autodep/pym/portage/checksum.py
new file mode 100644
index 0000000..9e7e455
--- /dev/null
+++ b/portage_with_autodep/pym/portage/checksum.py
@@ -0,0 +1,291 @@
+# checksum.py -- core Portage functionality
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.const import PRELINK_BINARY,HASHING_BLOCKSIZE
+from portage.localization import _
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+import errno
+import stat
+import tempfile
+
+#dict of all available hash functions
+hashfunc_map = {}
+hashorigin_map = {}
+
+def _generate_hash_function(hashtype, hashobject, origin="unknown"):
+ def pyhash(filename):
+ """
+ Run a checksum against a file.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @return: The hash and size of the data
+ """
+ try:
+ f = open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ except IOError as e:
+ func_call = "open('%s')" % filename
+ if e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.ENOENT:
+ raise portage.exception.FileNotFound(filename)
+ else:
+ raise
+ blocksize = HASHING_BLOCKSIZE
+ data = f.read(blocksize)
+ size = 0
+ checksum = hashobject()
+ while data:
+ checksum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+ f.close()
+
+ return (checksum.hexdigest(), size)
+ hashfunc_map[hashtype] = pyhash
+ hashorigin_map[hashtype] = origin
+ return pyhash
+
+# Define hash functions, try to use the best module available. Later definitions
+# override earlier ones
+
+# Use the internal modules as last fallback
+try:
+ from hashlib import md5 as _new_md5
+except ImportError:
+ from md5 import new as _new_md5
+
+md5hash = _generate_hash_function("MD5", _new_md5, origin="internal")
+
+try:
+ from hashlib import sha1 as _new_sha1
+except ImportError:
+ from sha import new as _new_sha1
+
+sha1hash = _generate_hash_function("SHA1", _new_sha1, origin="internal")
+
+# Use pycrypto when available, prefer it over the internal fallbacks
+try:
+ from Crypto.Hash import SHA256, RIPEMD
+ sha256hash = _generate_hash_function("SHA256", SHA256.new, origin="pycrypto")
+ rmd160hash = _generate_hash_function("RMD160", RIPEMD.new, origin="pycrypto")
+except ImportError as e:
+ pass
+
+# Use hashlib from python-2.5 if available and prefer it over pycrypto and internal fallbacks.
+# Need special handling for RMD160 as it may not always be provided by hashlib.
+try:
+ import hashlib
+
+ md5hash = _generate_hash_function("MD5", hashlib.md5, origin="hashlib")
+ sha1hash = _generate_hash_function("SHA1", hashlib.sha1, origin="hashlib")
+ sha256hash = _generate_hash_function("SHA256", hashlib.sha256, origin="hashlib")
+ try:
+ hashlib.new('ripemd160')
+ except ValueError:
+ pass
+ else:
+ def rmd160():
+ return hashlib.new('ripemd160')
+ rmd160hash = _generate_hash_function("RMD160", rmd160, origin="hashlib")
+except ImportError as e:
+ pass
+
+
+# Use python-fchksum if available, prefer it over all other MD5 implementations
+try:
+ import fchksum
+
+ def md5hash(filename):
+ return fchksum.fmd5t(filename)
+ hashfunc_map["MD5"] = md5hash
+ hashorigin_map["MD5"] = "python-fchksum"
+
+except ImportError:
+ pass
+
+# There is only one implementation for size
+def getsize(filename):
+ size = os.stat(filename).st_size
+ return (size, size)
+hashfunc_map["size"] = getsize
+
+# end actual hash functions
+
+prelink_capable = False
+if os.path.exists(PRELINK_BINARY):
+ results = portage.subprocess_getstatusoutput(
+ "%s --version > /dev/null 2>&1" % (PRELINK_BINARY,))
+ if (results[0] >> 8) == 0:
+ prelink_capable=1
+ del results
+
+def perform_md5(x, calc_prelink=0):
+ return perform_checksum(x, "MD5", calc_prelink)[0]
+
+def _perform_md5_merge(x, **kwargs):
+ return perform_md5(_unicode_encode(x,
+ encoding=_encodings['merge'], errors='strict'), **kwargs)
+
+def perform_all(x, calc_prelink=0):
+ mydict = {}
+ for k in hashfunc_map:
+ mydict[k] = perform_checksum(x, hashfunc_map[k], calc_prelink)[0]
+ return mydict
+
+def get_valid_checksum_keys():
+ return list(hashfunc_map)
+
+def get_hash_origin(hashtype):
+ if hashtype not in hashfunc_map:
+ raise KeyError(hashtype)
+ return hashorigin_map.get(hashtype, "unknown")
+
+def verify_all(filename, mydict, calc_prelink=0, strict=0):
+ """
+ Verify all checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
+ @type strict: Integer
+ @rtype: Tuple
+ @return: Result of the checks and possible message:
+ 1) If size fails, False, and a tuple containing a message, the given size, and the actual size
+ 2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
+ 3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
+ 4) If all checks succeed, return True and a fake reason
+ """
+ # Dict relates to single file only.
+ # returns: (passed,reason)
+ file_is_ok = True
+ reason = "Reason unknown"
+ try:
+ mysize = os.stat(filename)[stat.ST_SIZE]
+ if mydict["size"] != mysize:
+ return False,(_("Filesize does not match recorded size"), mysize, mydict["size"])
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise portage.exception.FileNotFound(filename)
+ return False, (str(e), None, None)
+
+ verifiable_hash_types = set(mydict).intersection(hashfunc_map)
+ verifiable_hash_types.discard("size")
+ if not verifiable_hash_types:
+ expected = set(hashfunc_map)
+ expected.discard("size")
+ expected = list(expected)
+ expected.sort()
+ expected = " ".join(expected)
+ got = set(mydict)
+ got.discard("size")
+ got = list(got)
+ got.sort()
+ got = " ".join(got)
+ return False, (_("Insufficient data for checksum verification"), got, expected)
+
+ for x in sorted(mydict):
+ if x == "size":
+ continue
+ elif x in hashfunc_map:
+ myhash = perform_checksum(filename, x, calc_prelink=calc_prelink)[0]
+ if mydict[x] != myhash:
+ if strict:
+ raise portage.exception.DigestException(
+ ("Failed to verify '$(file)s' on " + \
+ "checksum type '%(type)s'") % \
+ {"file" : filename, "type" : x})
+ else:
+ file_is_ok = False
+ reason = (("Failed on %s verification" % x), myhash,mydict[x])
+ break
+ return file_is_ok,reason
+
+def perform_checksum(filename, hashname="MD5", calc_prelink=0):
+ """
+ Run a specific checksum against a file. The filename can
+ be either unicode or an encoded byte string. If filename
+ is unicode then a UnicodeDecodeError will be raised if
+ necessary.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @param hashname: The type of hash function to run
+ @type hashname: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: The hash and size of the data
+ """
+ global prelink_capable
+ # Make sure filename is encoded with the correct encoding before
+ # it is passed to spawn (for prelink) and/or the hash function.
+ filename = _unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ myfilename = filename
+ prelink_tmpfile = None
+ try:
+ if calc_prelink and prelink_capable:
+ # Create non-prelinked temporary file to checksum.
+ # Files rejected by prelink are summed in place.
+ try:
+ tmpfile_fd, prelink_tmpfile = tempfile.mkstemp()
+ try:
+ retval = portage.process.spawn([PRELINK_BINARY,
+ "--verify", filename], fd_pipes={1:tmpfile_fd})
+ finally:
+ os.close(tmpfile_fd)
+ if retval == os.EX_OK:
+ myfilename = prelink_tmpfile
+ except portage.exception.CommandNotFound:
+ # This happens during uninstallation of prelink.
+ prelink_capable = False
+ try:
+ if hashname not in hashfunc_map:
+ raise portage.exception.DigestException(hashname + \
+ " hash function not available (needs dev-python/pycrypto)")
+ myhash, mysize = hashfunc_map[hashname](myfilename)
+ except (OSError, IOError) as e:
+ if e.errno == errno.ENOENT:
+ raise portage.exception.FileNotFound(myfilename)
+ raise
+ return myhash, mysize
+ finally:
+ if prelink_tmpfile:
+ try:
+ os.unlink(prelink_tmpfile)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+def perform_multiple_checksums(filename, hashes=["MD5"], calc_prelink=0):
+ """
+ Run a group of checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param hashes: A list of checksum functions to run against the file
+ @type hashname: List
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: A dictionary in the form:
+ return_value[hash_name] = (hash_result,size)
+ for each given checksum
+ """
+ rVal = {}
+ for x in hashes:
+ if x not in hashfunc_map:
+ raise portage.exception.DigestException(x+" hash function not available (needs dev-python/pycrypto or >=dev-lang/python-2.5)")
+ rVal[x] = perform_checksum(filename, x, calc_prelink)[0]
+ return rVal
diff --git a/portage_with_autodep/pym/portage/const.py b/portage_with_autodep/pym/portage/const.py
new file mode 100644
index 0000000..f34398d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/const.py
@@ -0,0 +1,143 @@
+# portage: Constants
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+
+# ===========================================================================
+# START OF CONSTANTS -- START OF CONSTANTS -- START OF CONSTANTS -- START OF
+# ===========================================================================
+
+# There are two types of variables here which can easily be confused,
+# resulting in arbitrary bugs, mainly exposed with an offset
+# installation (Prefix). The two types relate to the usage of
+# config_root or target_root.
+# The first, config_root (PORTAGE_CONFIGROOT), can be a path somewhere,
+# from which all derived paths need to be relative (e.g.
+# USER_CONFIG_PATH) without EPREFIX prepended in Prefix. This means
+# config_root can for instance be set to "$HOME/my/config". Obviously,
+# in such case it is not appropriate to prepend EPREFIX to derived
+# constants. The default value of config_root is EPREFIX (in non-Prefix
+# the empty string) -- overriding the value loses the EPREFIX as one
+# would expect.
+# Second there is target_root (ROOT) which is used to install somewhere
+# completely else, in Prefix of limited use. Because this is an offset
+# always given, the EPREFIX should always be applied in it, hence the
+# code always prefixes them with EROOT.
+# The variables in this file are grouped by config_root, target_root.
+
+# variables used with config_root (these need to be relative)
+MAKE_CONF_FILE = "etc/make.conf"
+USER_CONFIG_PATH = "etc/portage"
+MODULES_FILE_PATH = USER_CONFIG_PATH + "/modules"
+CUSTOM_PROFILE_PATH = USER_CONFIG_PATH + "/profile"
+USER_VIRTUALS_FILE = USER_CONFIG_PATH + "/virtuals"
+EBUILD_SH_ENV_FILE = USER_CONFIG_PATH + "/bashrc"
+EBUILD_SH_ENV_DIR = USER_CONFIG_PATH + "/env"
+CUSTOM_MIRRORS_FILE = USER_CONFIG_PATH + "/mirrors"
+COLOR_MAP_FILE = USER_CONFIG_PATH + "/color.map"
+PROFILE_PATH = "etc/make.profile"
+MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults" # FIXME: not used
+DEPRECATED_PROFILE_FILE = PROFILE_PATH + "/deprecated"
+
+# variables used with targetroot (these need to be absolute, but not
+# have a leading '/' since they are used directly with os.path.join on EROOT)
+VDB_PATH = "var/db/pkg"
+CACHE_PATH = "var/cache/edb"
+PRIVATE_PATH = "var/lib/portage"
+WORLD_FILE = PRIVATE_PATH + "/world"
+WORLD_SETS_FILE = PRIVATE_PATH + "/world_sets"
+CONFIG_MEMORY_FILE = PRIVATE_PATH + "/config"
+NEWS_LIB_PATH = "var/lib/gentoo"
+
+# these variables get EPREFIX prepended automagically when they are
+# translated into their lowercase variants
+DEPCACHE_PATH = "/var/cache/edb/dep"
+GLOBAL_CONFIG_PATH = "/usr/share/portage/config"
+
+# these variables are not used with target_root or config_root
+PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(__file__.split(os.sep)[:-3]))
+PORTAGE_BIN_PATH = PORTAGE_BASE_PATH + "/bin"
+PORTAGE_PYM_PATH = PORTAGE_BASE_PATH + "/pym"
+LOCALE_DATA_PATH = PORTAGE_BASE_PATH + "/locale" # FIXME: not used
+EBUILD_SH_BINARY = PORTAGE_BIN_PATH + "/ebuild.sh"
+MISC_SH_BINARY = PORTAGE_BIN_PATH + "/misc-functions.sh"
+SANDBOX_BINARY = "/usr/bin/sandbox"
+FAKEROOT_BINARY = "/usr/bin/fakeroot"
+BASH_BINARY = "/bin/bash"
+MOVE_BINARY = "/bin/mv"
+PRELINK_BINARY = "/usr/sbin/prelink"
+AUTODEP_LIBRARY = "/usr/lib/file_hook.so"
+
+
+INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env"
+REPO_NAME_FILE = "repo_name"
+REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
+
+PORTAGE_PACKAGE_ATOM = "sys-apps/portage"
+LIBC_PACKAGE_ATOM = "virtual/libc"
+OS_HEADERS_PACKAGE_ATOM = "virtual/os-headers"
+
+INCREMENTALS = ("USE", "USE_EXPAND", "USE_EXPAND_HIDDEN",
+ "FEATURES", "ACCEPT_KEYWORDS",
+ "CONFIG_PROTECT_MASK", "CONFIG_PROTECT",
+ "PRELINK_PATH", "PRELINK_PATH_MASK",
+ "PROFILE_ONLY_VARIABLES")
+EBUILD_PHASES = ("pretend", "setup", "unpack", "prepare", "configure",
+ "compile", "test", "install",
+ "package", "preinst", "postinst","prerm", "postrm",
+ "nofetch", "config", "info", "other")
+SUPPORTED_FEATURES = frozenset([
+ "allow-missing-manifests",
+ "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy",
+ "ccache", "chflags", "collision-protect", "compress-build-logs",
+ "depcheck", "depcheckstrict",
+ "digest", "distcc", "distcc-pump", "distlocks", "ebuild-locks", "fakeroot",
+ "fail-clean", "fixpackages", "force-mirror", "getbinpkg",
+ "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror",
+ "metadata-transfer", "mirror", "multilib-strict", "news",
+ "noauto", "noclean", "nodoc", "noinfo", "noman",
+ "nostrip", "notitles", "parallel-fetch", "parallel-install",
+ "parse-eapi-ebuild-head",
+ "prelink-checksums", "preserve-libs",
+ "protect-owned", "python-trace", "sandbox",
+ "selinux", "sesandbox", "sfperms",
+ "sign", "skiprocheck", "split-elog", "split-log", "splitdebug",
+ "strict", "stricter", "suidctl", "test", "test-fail-continue",
+ "unknown-features-filter", "unknown-features-warn",
+ "unmerge-logs", "unmerge-orphans", "userfetch", "userpriv",
+ "usersandbox", "usersync", "webrsync-gpg"])
+
+EAPI = 4
+
+HASHING_BLOCKSIZE = 32768
+MANIFEST1_HASH_FUNCTIONS = ("MD5", "SHA256", "RMD160")
+MANIFEST2_HASH_FUNCTIONS = ("SHA1", "SHA256", "RMD160")
+
+MANIFEST1_REQUIRED_HASH = "MD5"
+MANIFEST2_REQUIRED_HASH = "SHA1"
+
+MANIFEST2_IDENTIFIERS = ("AUX", "MISC", "DIST", "EBUILD")
+# ===========================================================================
+# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
+# ===========================================================================
+
+# Private constants for use in conditional code in order to minimize the diff
+# between branches.
+_ENABLE_DYN_LINK_MAP = True
+_ENABLE_PRESERVE_LIBS = True
+_ENABLE_REPO_NAME_WARN = True
+_ENABLE_SET_CONFIG = True
+_SANDBOX_COMPAT_LEVEL = "22"
+
+
+# The definitions above will differ between branches, so it's useful to have
+# common lines of diff context here in order to avoid merge conflicts.
+
+if not _ENABLE_PRESERVE_LIBS:
+ SUPPORTED_FEATURES = set(SUPPORTED_FEATURES)
+ SUPPORTED_FEATURES.remove("preserve-libs")
+ SUPPORTED_FEATURES = frozenset(SUPPORTED_FEATURES)
+
+if not _ENABLE_SET_CONFIG:
+ WORLD_SETS_FILE = '/dev/null'
diff --git a/portage_with_autodep/pym/portage/cvstree.py b/portage_with_autodep/pym/portage/cvstree.py
new file mode 100644
index 0000000..9ba22f3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cvstree.py
@@ -0,0 +1,293 @@
+# cvstree.py -- cvs tree utilities
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import io
+import re
+import stat
+import sys
+import time
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+# [D]/Name/Version/Date/Flags/Tags
+
+def pathdata(entries, path):
+ """(entries,path)
+ Returns the data(dict) for a specific file/dir at the path specified."""
+ mysplit=path.split("/")
+ myentries=entries
+ mytarget=mysplit[-1]
+ mysplit=mysplit[:-1]
+ for mys in mysplit:
+ if mys in myentries["dirs"]:
+ myentries=myentries["dirs"][mys]
+ else:
+ return None
+ if mytarget in myentries["dirs"]:
+ return myentries["dirs"][mytarget]
+ elif mytarget in myentries["files"]:
+ return myentries["files"][mytarget]
+ else:
+ return None
+
+def fileat(entries, path):
+ return pathdata(entries,path)
+
+def isadded(entries, path):
+ """(entries,path)
+ Returns true if the path exists and is added to the cvs tree."""
+ mytarget=pathdata(entries, path)
+ if mytarget:
+ if "cvs" in mytarget["status"]:
+ return 1
+
+ basedir=os.path.dirname(path)
+ filename=os.path.basename(path)
+
+ try:
+ myfile = io.open(
+ _unicode_encode(os.path.join(basedir, 'CVS', 'Entries'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ except IOError:
+ return 0
+ mylines=myfile.readlines()
+ myfile.close()
+
+ rep=re.compile("^\/"+re.escape(filename)+"\/");
+ for x in mylines:
+ if rep.search(x):
+ return 1
+
+ return 0
+
+def findnew(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that have been added but
+ have not yet been committed. Returns a list of paths, optionally prepended
+ with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "0" == entries["files"][myfile]["revision"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist+=findnew(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findoption(entries, pattern, recursive=0, basedir=""):
+ """(entries, pattern, recursive=0, basedir="")
+ Iterate over paths of cvs entries for which the pattern.search() method
+ finds a match. Returns a list of paths, optionally prepended with a
+ basedir."""
+ if not basedir.endswith("/"):
+ basedir += "/"
+ for myfile, mydata in entries["files"].items():
+ if "cvs" in mydata["status"]:
+ if pattern.search(mydata["flags"]):
+ yield basedir+myfile
+ if recursive:
+ for mydir, mydata in entries["dirs"].items():
+ for x in findoption(mydata, pattern,
+ recursive, basedir+mydir):
+ yield x
+
+def findchanged(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that exist in the cvs tree
+ and differ from the committed version. Returns a list of paths, optionally
+ prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "current" not in entries["files"][myfile]["status"]:
+ if "exists" in entries["files"][myfile]["status"]:
+ if entries["files"][myfile]["revision"]!="0":
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist+=findchanged(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findmissing(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are listed in the cvs
+ tree but do not exist on the filesystem. Returns a list of paths,
+ optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "exists" not in entries["files"][myfile]["status"]:
+ if "removed" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist+=findmissing(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findunadded(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are in valid cvs
+ directories but are not part of the cvs tree. Returns a list of paths,
+ optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+
+ #ignore what cvs ignores.
+ for myfile in entries["files"]:
+ if "cvs" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist+=findunadded(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findremoved(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are in flagged for cvs
+ deletions. Returns a list of paths, optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"]:
+ if "removed" in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist+=findremoved(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findall(entries, recursive=0, basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all new, changed, missing, and unadded
+ entities. Returns a 4 element list of lists as returned from each find*()."""
+
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mynew = findnew(entries,recursive,basedir)
+ mychanged = findchanged(entries,recursive,basedir)
+ mymissing = findmissing(entries,recursive,basedir)
+ myunadded = findunadded(entries,recursive,basedir)
+ myremoved = findremoved(entries,recursive,basedir)
+ return [mynew, mychanged, mymissing, myunadded, myremoved]
+
+ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
+def apply_cvsignore_filter(list):
+ x=0
+ while x < len(list):
+ if ignore_list.match(list[x].split("/")[-1]):
+ list.pop(x)
+ else:
+ x+=1
+ return list
+
+def getentries(mydir,recursive=0):
+ """(basedir,recursive=0)
+ Scans the given directory and returns a datadict of all the entries in
+ the directory separated as a dirs dict and a files dict."""
+ myfn=mydir+"/CVS/Entries"
+ # entries=[dirs, files]
+ entries={"dirs":{},"files":{}}
+ if not os.path.exists(mydir):
+ return entries
+ try:
+ myfile = io.open(_unicode_encode(myfn,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ mylines=myfile.readlines()
+ myfile.close()
+ except SystemExit as e:
+ raise
+ except:
+ mylines=[]
+ for line in mylines:
+ if line and line[-1]=="\n":
+ line=line[:-1]
+ if not line:
+ continue
+ if line=="D": # End of entries file
+ break
+ mysplit=line.split("/")
+ if len(mysplit)!=6:
+ print("Confused:",mysplit)
+ continue
+ if mysplit[0]=="D":
+ entries["dirs"][mysplit[1]]={"dirs":{},"files":{},"status":[]}
+ entries["dirs"][mysplit[1]]["status"]=["cvs"]
+ if os.path.isdir(mydir+"/"+mysplit[1]):
+ entries["dirs"][mysplit[1]]["status"]+=["exists"]
+ entries["dirs"][mysplit[1]]["flags"]=mysplit[2:]
+ if recursive:
+ rentries=getentries(mydir+"/"+mysplit[1],recursive)
+ entries["dirs"][mysplit[1]]["dirs"]=rentries["dirs"]
+ entries["dirs"][mysplit[1]]["files"]=rentries["files"]
+ else:
+ # [D]/Name/revision/Date/Flags/Tags
+ entries["files"][mysplit[1]]={}
+ entries["files"][mysplit[1]]["revision"]=mysplit[2]
+ entries["files"][mysplit[1]]["date"]=mysplit[3]
+ entries["files"][mysplit[1]]["flags"]=mysplit[4]
+ entries["files"][mysplit[1]]["tags"]=mysplit[5]
+ entries["files"][mysplit[1]]["status"]=["cvs"]
+ if entries["files"][mysplit[1]]["revision"][0]=="-":
+ entries["files"][mysplit[1]]["status"]+=["removed"]
+
+ for file in apply_cvsignore_filter(os.listdir(mydir)):
+ if file=="CVS":
+ continue
+ if os.path.isdir(mydir+"/"+file):
+ if file not in entries["dirs"]:
+ entries["dirs"][file]={"dirs":{},"files":{}}
+ # It's normal for a directory to be unlisted in Entries
+ # when checked out without -P (see bug #257660).
+ rentries=getentries(mydir+"/"+file,recursive)
+ entries["dirs"][file]["dirs"]=rentries["dirs"]
+ entries["dirs"][file]["files"]=rentries["files"]
+ if "status" in entries["dirs"][file]:
+ if "exists" not in entries["dirs"][file]["status"]:
+ entries["dirs"][file]["status"]+=["exists"]
+ else:
+ entries["dirs"][file]["status"]=["exists"]
+ elif os.path.isfile(mydir+"/"+file):
+ if file not in entries["files"]:
+ entries["files"][file]={"revision":"","date":"","flags":"","tags":""}
+ if "status" in entries["files"][file]:
+ if "exists" not in entries["files"][file]["status"]:
+ entries["files"][file]["status"]+=["exists"]
+ else:
+ entries["files"][file]["status"]=["exists"]
+ try:
+ mystat=os.stat(mydir+"/"+file)
+ mytime = time.asctime(time.gmtime(mystat[stat.ST_MTIME]))
+ if "status" not in entries["files"][file]:
+ entries["files"][file]["status"]=[]
+ if mytime==entries["files"][file]["date"]:
+ entries["files"][file]["status"]+=["current"]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ print("failed to stat",file)
+ print(e)
+ return
+
+ else:
+ print()
+ print("File of unknown type:",mydir+"/"+file)
+ print()
+ return entries
diff --git a/portage_with_autodep/pym/portage/data.py b/portage_with_autodep/pym/portage/data.py
new file mode 100644
index 0000000..c38fa17
--- /dev/null
+++ b/portage_with_autodep/pym/portage/data.py
@@ -0,0 +1,122 @@
+# data.py -- Calculated/Discovered Data Values
+# Copyright 1998-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os, pwd, grp, platform
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.output:colorize',
+ 'portage.util:writemsg',
+)
+from portage.localization import _
+
+ostype=platform.system()
+userland = None
+if ostype == "DragonFly" or ostype.endswith("BSD"):
+ userland = "BSD"
+else:
+ userland = "GNU"
+
+lchown = getattr(os, "lchown", None)
+
+if not lchown:
+ if ostype == "Darwin":
+ def lchown(*pos_args, **key_args):
+ pass
+ else:
+ def lchown(*pargs, **kwargs):
+ writemsg(colorize("BAD", "!!!") + _(
+ " It seems that os.lchown does not"
+ " exist. Please rebuild python.\n"), noiselevel=-1)
+ lchown()
+
+lchown = portage._unicode_func_wrapper(lchown)
+
+def portage_group_warning():
+ warn_prefix = colorize("BAD", "*** WARNING *** ")
+ mylines = [
+ "For security reasons, only system administrators should be",
+ "allowed in the portage group. Untrusted users or processes",
+ "can potentially exploit the portage group for attacks such as",
+ "local privilege escalation."
+ ]
+ for x in mylines:
+ writemsg(warn_prefix, noiselevel=-1)
+ writemsg(x, noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+# Portage has 3 security levels that depend on the uid and gid of the main
+# process and are assigned according to the following table:
+#
+# Privileges secpass uid gid
+# normal 0 any any
+# group 1 any portage_gid
+# super 2 0 any
+#
+# If the "wheel" group does not exist then wheelgid falls back to 0.
+# If the "portage" group does not exist then portage_uid falls back to wheelgid.
+
+secpass=0
+
+uid=os.getuid()
+wheelgid=0
+
+if uid==0:
+ secpass=2
+try:
+ wheelgid=grp.getgrnam("wheel")[2]
+except KeyError:
+ pass
+
+# Allow the overriding of the user used for 'userpriv' and 'userfetch'
+_portage_uname = os.environ.get('PORTAGE_USERNAME', 'portage')
+_portage_grpname = os.environ.get('PORTAGE_GRPNAME', 'portage')
+
+#Discover the uid and gid of the portage user/group
+try:
+ portage_uid = pwd.getpwnam(_portage_uname)[2]
+ portage_gid = grp.getgrnam(_portage_grpname)[2]
+ if secpass < 1 and portage_gid in os.getgroups():
+ secpass=1
+except KeyError:
+ portage_uid=0
+ portage_gid=0
+ userpriv_groups = [portage_gid]
+ writemsg(colorize("BAD",
+ _("portage: 'portage' user or group missing.")) + "\n", noiselevel=-1)
+ writemsg(_(
+ " For the defaults, line 1 goes into passwd, "
+ "and 2 into group.\n"), noiselevel=-1)
+ writemsg(colorize("GOOD",
+ " portage:x:250:250:portage:/var/tmp/portage:/bin/false") \
+ + "\n", noiselevel=-1)
+ writemsg(colorize("GOOD", " portage::250:portage") + "\n",
+ noiselevel=-1)
+ portage_group_warning()
+else:
+ userpriv_groups = [portage_gid]
+ if secpass >= 2:
+ class _LazyUserprivGroups(portage.proxy.objectproxy.ObjectProxy):
+ def _get_target(self):
+ global userpriv_groups
+ if userpriv_groups is not self:
+ return userpriv_groups
+ userpriv_groups = _userpriv_groups
+ # Get a list of group IDs for the portage user. Do not use
+ # grp.getgrall() since it is known to trigger spurious
+ # SIGPIPE problems with nss_ldap.
+ mystatus, myoutput = \
+ portage.subprocess_getstatusoutput("id -G %s" % _portage_uname)
+ if mystatus == os.EX_OK:
+ for x in myoutput.split():
+ try:
+ userpriv_groups.append(int(x))
+ except ValueError:
+ pass
+ userpriv_groups[:] = sorted(set(userpriv_groups))
+ return userpriv_groups
+
+ _userpriv_groups = userpriv_groups
+ userpriv_groups = _LazyUserprivGroups()
diff --git a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
new file mode 100644
index 0000000..34ed031
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
@@ -0,0 +1,282 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import shutil
+import signal
+import tempfile
+import traceback
+
+import errno
+import fcntl
+import portage
+from portage import os, _unicode_decode
+from portage.const import PORTAGE_PACKAGE_ATOM
+from portage.dep import match_from_list
+import portage.elog.messages
+from portage.elog import _preload_elog_modules
+from portage.util import ensure_dirs
+from _emerge.PollConstants import PollConstants
+from _emerge.SpawnProcess import SpawnProcess
+
+class MergeProcess(SpawnProcess):
+ """
+ Merge packages in a subprocess, so the Scheduler can run in the main
+ thread while files are moved or copied asynchronously.
+ """
+
+ __slots__ = ('mycat', 'mypkg', 'settings', 'treetype',
+ 'vartree', 'scheduler', 'blockers', 'pkgloc', 'infloc', 'myebuild',
+ 'mydbapi', 'prev_mtimes', 'unmerge', '_elog_reader_fd', '_elog_reg_id',
+ '_buf', '_elog_keys', '_locked_vdb')
+
+ def _start(self):
+ # Portage should always call setcpv prior to this
+ # point, but here we have a fallback as a convenience
+ # for external API consumers. It's important that
+ # this metadata access happens in the parent process,
+ # since closing of file descriptors in the subprocess
+ # can prevent access to open database connections such
+ # as that used by the sqlite metadata cache module.
+ cpv = "%s/%s" % (self.mycat, self.mypkg)
+ settings = self.settings
+ if cpv != settings.mycpv or \
+ "EAPI" not in settings.configdict["pkg"]:
+ settings.reload()
+ settings.reset()
+ settings.setcpv(cpv, mydb=self.mydbapi)
+
+ if not self.unmerge:
+ self._handle_self_reinstall()
+ super(MergeProcess, self)._start()
+
+ def _lock_vdb(self):
+ """
+ Lock the vdb if FEATURES=parallel-install is NOT enabled,
+ otherwise do nothing. This is implemented with
+ vardbapi.lock(), which supports reentrance by the
+ subprocess that we spawn.
+ """
+ if "parallel-install" not in self.settings.features:
+ self.vartree.dbapi.lock()
+ self._locked_vdb = True
+
+ def _unlock_vdb(self):
+ """
+ Unlock the vdb if we hold a lock, otherwise do nothing.
+ """
+ if self._locked_vdb:
+ self.vartree.dbapi.unlock()
+ self._locked_vdb = False
+
+ def _handle_self_reinstall(self):
+ """
+ If portage is reinstalling itself, create temporary
+ copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
+ to avoid relying on the new versions which may be
+ incompatible. Register an atexit hook to clean up the
+ temporary directories. Pre-load elog modules here since
+ we won't be able to later if they get unmerged (happens
+ when namespace changes).
+ """
+
+ settings = self.settings
+ cpv = settings.mycpv
+ reinstall_self = False
+ if self.settings["ROOT"] == "/" and \
+ match_from_list(PORTAGE_PACKAGE_ATOM, [cpv]):
+ inherited = frozenset(self.settings.get('INHERITED', '').split())
+ if not self.vartree.dbapi.cpv_exists(cpv) or \
+ '9999' in cpv or \
+ 'git' in inherited or \
+ 'git-2' in inherited:
+ reinstall_self = True
+
+ if reinstall_self:
+ # Load lazily referenced portage submodules into memory,
+ # so imports won't fail during portage upgrade/downgrade.
+ _preload_elog_modules(self.settings)
+ portage.proxy.lazyimport._preload_portage_submodules()
+
+ # Make the temp directory inside $PORTAGE_TMPDIR/portage, since
+ # it's common for /tmp and /var/tmp to be mounted with the
+ # "noexec" option (see bug #346899).
+ build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
+ ensure_dirs(build_prefix)
+ base_path_tmp = tempfile.mkdtemp(
+ "", "._portage_reinstall_.", build_prefix)
+ portage.process.atexit_register(shutil.rmtree, base_path_tmp)
+ dir_perms = 0o755
+ for subdir in "bin", "pym":
+ var_name = "PORTAGE_%s_PATH" % subdir.upper()
+ var_orig = settings[var_name]
+ var_new = os.path.join(base_path_tmp, subdir)
+ settings[var_name] = var_new
+ settings.backup_changes(var_name)
+ shutil.copytree(var_orig, var_new, symlinks=True)
+ os.chmod(var_new, dir_perms)
+ portage._bin_path = settings['PORTAGE_BIN_PATH']
+ portage._pym_path = settings['PORTAGE_PYM_PATH']
+ os.chmod(base_path_tmp, dir_perms)
+
+ def _elog_output_handler(self, fd, event):
+ output = None
+ if event & PollConstants.POLLIN:
+ try:
+ output = os.read(fd, self._bufsize)
+ except OSError as e:
+ if e.errno not in (errno.EAGAIN, errno.EINTR):
+ raise
+ if output:
+ lines = _unicode_decode(output).split('\n')
+ if len(lines) == 1:
+ self._buf += lines[0]
+ else:
+ lines[0] = self._buf + lines[0]
+ self._buf = lines.pop()
+ out = io.StringIO()
+ for line in lines:
+ funcname, phase, key, msg = line.split(' ', 3)
+ self._elog_keys.add(key)
+ reporter = getattr(portage.elog.messages, funcname)
+ reporter(msg, phase=phase, key=key, out=out)
+
+ def _spawn(self, args, fd_pipes, **kwargs):
+ """
+ Fork a subprocess, apply local settings, and call
+ dblink.merge().
+ """
+
+ elog_reader_fd, elog_writer_fd = os.pipe()
+ fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
+ fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+ blockers = None
+ if self.blockers is not None:
+ # Query blockers in the main process, since closing
+ # of file descriptors in the subprocess can prevent
+ # access to open database connections such as that
+ # used by the sqlite metadata cache module.
+ blockers = self.blockers()
+ mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
+ treetype=self.treetype, vartree=self.vartree,
+ blockers=blockers, scheduler=self.scheduler,
+ pipe=elog_writer_fd)
+ fd_pipes[elog_writer_fd] = elog_writer_fd
+ self._elog_reg_id = self.scheduler.register(elog_reader_fd,
+ self._registered_events, self._elog_output_handler)
+
+ # If a concurrent emerge process tries to install a package
+ # in the same SLOT as this one at the same time, there is an
+ # extremely unlikely chance that the COUNTER values will not be
+ # ordered correctly unless we lock the vdb here.
+ # FEATURES=parallel-install skips this lock in order to
+ # improve performance, and the risk is practically negligible.
+ self._lock_vdb()
+ counter = None
+ if not self.unmerge:
+ counter = self.vartree.dbapi.counter_tick()
+
+ pid = os.fork()
+ if pid != 0:
+ os.close(elog_writer_fd)
+ self._elog_reader_fd = elog_reader_fd
+ self._buf = ""
+ self._elog_keys = set()
+
+ # invalidate relevant vardbapi caches
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+ self.vartree.dbapi._pkgs_changed = True
+ self.vartree.dbapi._clear_pkg_cache(mylink)
+
+ portage.process.spawned_pids.append(pid)
+ return [pid]
+
+ os.close(elog_reader_fd)
+ portage.process._setup_pipes(fd_pipes)
+
+ # Use default signal handlers since the ones inherited
+ # from the parent process are irrelevant here.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ portage.output.havecolor = self.settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ # In this subprocess we want mylink._display_merge() to use
+ # stdout/stderr directly since they are pipes. This behavior
+ # is triggered when mylink._scheduler is None.
+ mylink._scheduler = None
+
+ # Avoid wastful updates of the vdb cache.
+ self.vartree.dbapi._flush_cache_enabled = False
+
+ # In this subprocess we don't want PORTAGE_BACKGROUND to
+ # suppress stdout/stderr output since they are pipes. We
+ # also don't want to open PORTAGE_LOG_FILE, since it will
+ # already be opened by the parent process, so we set the
+ # "subprocess" value for use in conditional logging code
+ # involving PORTAGE_LOG_FILE.
+ if not self.unmerge:
+ # unmerge phases have separate logs
+ if self.settings.get("PORTAGE_BACKGROUND") == "1":
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
+ else:
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
+ self.settings["PORTAGE_BACKGROUND"] = "subprocess"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+
+ rval = 1
+ try:
+ if self.unmerge:
+ if not mylink.exists():
+ rval = os.EX_OK
+ elif mylink.unmerge(
+ ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ rval = os.EX_OK
+ else:
+ rval = mylink.merge(self.pkgloc, self.infloc,
+ myebuild=self.myebuild, mydbapi=self.mydbapi,
+ prev_mtimes=self.prev_mtimes, counter=counter)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ finally:
+ # Call os._exit() from finally block, in order to suppress any
+ # finally blocks from earlier in the call stack. See bug #345289.
+ os._exit(rval)
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ if not self.unmerge:
+ # Populate the vardbapi cache for the new package
+ # while its inodes are still hot.
+ try:
+ self.vartree.dbapi.aux_get(self.settings.mycpv, ["EAPI"])
+ except KeyError:
+ pass
+
+ self._unlock_vdb()
+ if self._elog_reg_id is not None:
+ self.scheduler.unregister(self._elog_reg_id)
+ self._elog_reg_id = None
+ if self._elog_reader_fd:
+ os.close(self._elog_reader_fd)
+ self._elog_reader_fd = None
+ if self._elog_keys is not None:
+ for key in self._elog_keys:
+ portage.elog.elog_process(key, self.settings,
+ phasefilter=("prerm", "postrm"))
+ self._elog_keys = None
+
+ super(MergeProcess, self)._unregister()
diff --git a/portage_with_autodep/pym/portage/dbapi/__init__.py b/portage_with_autodep/pym/portage/dbapi/__init__.py
new file mode 100644
index 0000000..e386faa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/__init__.py
@@ -0,0 +1,302 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["dbapi"]
+
+import re
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dbapi.dep_expand:dep_expand@_dep_expand',
+ 'portage.dep:match_from_list',
+ 'portage.output:colorize',
+ 'portage.util:cmp_sort_key,writemsg',
+ 'portage.versions:catsplit,catpkgsplit,vercmp',
+)
+
+from portage import os
+from portage import auxdbkeys
+from portage.localization import _
+
+class dbapi(object):
+ _category_re = re.compile(r'^\w[-.+\w]*$')
+ _categories = None
+ _use_mutable = False
+ _known_keys = frozenset(x for x in auxdbkeys
+ if not x.startswith("UNUSED_0"))
+ def __init__(self):
+ pass
+
+ @property
+ def categories(self):
+ """
+ Use self.cp_all() to generate a category list. Mutable instances
+ can delete the self._categories attribute in cases when the cached
+ categories become invalid and need to be regenerated.
+ """
+ if self._categories is not None:
+ return self._categories
+ self._categories = tuple(sorted(set(catsplit(x)[0] \
+ for x in self.cp_all())))
+ return self._categories
+
+ def close_caches(self):
+ pass
+
+ def cp_list(self, cp, use_cache=1):
+ raise NotImplementedError(self)
+
+ def _cpv_sort_ascending(self, cpv_list):
+ """
+ Use this to sort self.cp_list() results in ascending
+ order. It sorts in place and returns None.
+ """
+ if len(cpv_list) > 1:
+ # If the cpv includes explicit -r0, it has to be preserved
+ # for consistency in findname and aux_get calls, so use a
+ # dict to map strings back to their original values.
+ ver_map = {}
+ for cpv in cpv_list:
+ ver_map[cpv] = '-'.join(catpkgsplit(cpv)[2:])
+ def cmp_cpv(cpv1, cpv2):
+ return vercmp(ver_map[cpv1], ver_map[cpv2])
+ cpv_list.sort(key=cmp_sort_key(cmp_cpv))
+
+ def cpv_all(self):
+ """Return all CPVs in the db
+ Args:
+ None
+ Returns:
+ A list of Strings, 1 per CPV
+
+ This function relies on a subclass implementing cp_all, this is why the hasattr is there
+ """
+
+ if not hasattr(self, "cp_all"):
+ raise NotImplementedError
+ cpv_list = []
+ for cp in self.cp_all():
+ cpv_list.extend(self.cp_list(cp))
+ return cpv_list
+
+ def cp_all(self):
+ """ Implement this in a child class
+ Args
+ None
+ Returns:
+ A list of strings 1 per CP in the datastore
+ """
+ return NotImplementedError
+
+ def aux_get(self, mycpv, mylist, myrepo=None):
+ """Return the metadata keys in mylist for mycpv
+ Args:
+ mycpv - "sys-apps/foo-1.0"
+ mylist - ["SLOT","DEPEND","HOMEPAGE"]
+ myrepo - The repository name.
+ Returns:
+ a list of results, in order of keys in mylist, such as:
+ ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
+ """
+ raise NotImplementedError
+
+ def aux_update(self, cpv, metadata_updates):
+ """
+ Args:
+ cpv - "sys-apps/foo-1.0"
+ metadata_updates = { key : newvalue }
+ Returns:
+ None
+ """
+ raise NotImplementedError
+
+ def match(self, origdep, use_cache=1):
+ """Given a dependency, try to find packages that match
+ Args:
+ origdep - Depend atom
+ use_cache - Boolean indicating if we should use the cache or not
+ NOTE: Do we ever not want the cache?
+ Returns:
+ a list of packages that match origdep
+ """
+ mydep = _dep_expand(origdep, mydb=self, settings=self.settings)
+ return list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+
+ def _iter_match(self, atom, cpv_iter):
+ cpv_iter = iter(match_from_list(atom, cpv_iter))
+ if atom.slot:
+ cpv_iter = self._iter_match_slot(atom, cpv_iter)
+ if atom.unevaluated_atom.use:
+ cpv_iter = self._iter_match_use(atom, cpv_iter)
+ if atom.repo:
+ cpv_iter = self._iter_match_repo(atom, cpv_iter)
+ return cpv_iter
+
+ def _iter_match_repo(self, atom, cpv_iter):
+ for cpv in cpv_iter:
+ try:
+ if self.aux_get(cpv, ["repository"], myrepo=atom.repo)[0] == atom.repo:
+ yield cpv
+ except KeyError:
+ continue
+
+ def _iter_match_slot(self, atom, cpv_iter):
+ for cpv in cpv_iter:
+ try:
+ if self.aux_get(cpv, ["SLOT"], myrepo=atom.repo)[0] == atom.slot:
+ yield cpv
+ except KeyError:
+ continue
+
+ def _iter_match_use(self, atom, cpv_iter):
+ """
+ 1) Check for required IUSE intersection (need implicit IUSE here).
+ 2) Check enabled/disabled flag states.
+ """
+
+ iuse_implicit_match = self.settings._iuse_implicit_match
+ for cpv in cpv_iter:
+ try:
+ iuse, slot, use = self.aux_get(cpv, ["IUSE", "SLOT", "USE"], myrepo=atom.repo)
+ except KeyError:
+ continue
+ iuse = frozenset(x.lstrip('+-') for x in iuse.split())
+ missing_iuse = False
+ for x in atom.unevaluated_atom.use.required:
+ if x not in iuse and not iuse_implicit_match(x):
+ missing_iuse = True
+ break
+ if missing_iuse:
+ continue
+ if not atom.use:
+ pass
+ elif not self._use_mutable:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption).
+ use = frozenset(x for x in use.split() if x in iuse or \
+ iuse_implicit_match(x))
+ missing_enabled = atom.use.missing_enabled.difference(iuse)
+ missing_disabled = atom.use.missing_disabled.difference(iuse)
+
+ if atom.use.enabled:
+ if atom.use.enabled.intersection(missing_disabled):
+ continue
+ need_enabled = atom.use.enabled.difference(use)
+ if need_enabled:
+ need_enabled = need_enabled.difference(missing_enabled)
+ if need_enabled:
+ continue
+
+ if atom.use.disabled:
+ if atom.use.disabled.intersection(missing_enabled):
+ continue
+ need_disabled = atom.use.disabled.intersection(use)
+ if need_disabled:
+ need_disabled = need_disabled.difference(missing_disabled)
+ if need_disabled:
+ continue
+ else:
+ # Check masked and forced flags for repoman.
+ mysettings = getattr(self, 'settings', None)
+ if mysettings is not None and not mysettings.local_config:
+
+ pkg = "%s:%s" % (cpv, slot)
+ usemask = mysettings._getUseMask(pkg)
+ if usemask.intersection(atom.use.enabled):
+ continue
+
+ useforce = mysettings._getUseForce(pkg).difference(usemask)
+ if useforce.intersection(atom.use.disabled):
+ continue
+
+ yield cpv
+
+ def invalidentry(self, mypath):
+ if '/-MERGING-' in mypath:
+ if os.path.exists(mypath):
+ writemsg(colorize("BAD", _("INCOMPLETE MERGE:"))+" %s\n" % mypath,
+ noiselevel=-1)
+ else:
+ writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
+
+ def update_ents(self, updates, onProgress=None, onUpdate=None):
+ """
+ Update metadata of all packages for package moves.
+ @param updates: A list of move commands, or dict of {repo_name: list}
+ @type updates: list or dict
+ @param onProgress: A progress callback function
+ @type onProgress: a callable that takes 2 integer arguments: maxval and curval
+ @param onUpdate: A progress callback function called only
+ for packages that are modified by updates.
+ @type onUpdate: a callable that takes 2 integer arguments:
+ maxval and curval
+ """
+ cpv_all = self.cpv_all()
+ cpv_all.sort()
+ maxval = len(cpv_all)
+ aux_get = self.aux_get
+ aux_update = self.aux_update
+ meta_keys = ["DEPEND", "RDEPEND", "PDEPEND", "PROVIDE", 'repository']
+ repo_dict = None
+ if isinstance(updates, dict):
+ repo_dict = updates
+ from portage.update import update_dbentries
+ if onUpdate:
+ onUpdate(maxval, 0)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, cpv in enumerate(cpv_all):
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ repo = metadata.pop('repository')
+ if repo_dict is None:
+ updates_list = updates
+ else:
+ try:
+ updates_list = repo_dict[repo]
+ except KeyError:
+ try:
+ updates_list = repo_dict['DEFAULT']
+ except KeyError:
+ continue
+
+ if not updates_list:
+ continue
+
+ metadata_updates = update_dbentries(updates_list, metadata)
+ if metadata_updates:
+ aux_update(cpv, metadata_updates)
+ if onUpdate:
+ onUpdate(maxval, i+1)
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ def move_slot_ent(self, mylist, repo_match=None):
+ """This function takes a sequence:
+ Args:
+ mylist: a sequence of (package, originalslot, newslot)
+ repo_match: callable that takes single repo_name argument
+ and returns True if the update should be applied
+ Returns:
+ The number of slotmoves this function did
+ """
+ pkg = mylist[1]
+ origslot = mylist[2]
+ newslot = mylist[3]
+ origmatches = self.match(pkg)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ slot = self.aux_get(mycpv, ["SLOT"])[0]
+ if slot != origslot:
+ continue
+ if repo_match is not None \
+ and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
+ continue
+ moves += 1
+ mydata = {"SLOT": newslot+"\n"}
+ self.aux_update(mycpv, mydata)
+ return moves
diff --git a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
new file mode 100644
index 0000000..6d6a27d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
@@ -0,0 +1,72 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.dep import Atom, _get_useflag_re
+
+def expand_new_virt(vardb, atom):
+ """
+ Iterate over the recursively expanded RDEPEND atoms of
+ a new-style virtual. If atom is not a new-style virtual
+ or it does not match an installed package then it is
+ yielded without any expansion.
+ """
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+
+ if not atom.cp.startswith("virtual/"):
+ yield atom
+ return
+
+ traversed = set()
+ stack = [atom]
+
+ while stack:
+ atom = stack.pop()
+ if atom.blocker or \
+ not atom.cp.startswith("virtual/"):
+ yield atom
+ continue
+
+ matches = vardb.match(atom)
+ if not (matches and matches[-1].startswith("virtual/")):
+ yield atom
+ continue
+
+ virt_cpv = matches[-1]
+ if virt_cpv in traversed:
+ continue
+
+ traversed.add(virt_cpv)
+ eapi, iuse, rdepend, use = vardb.aux_get(virt_cpv,
+ ["EAPI", "IUSE", "RDEPEND", "USE"])
+ if not portage.eapi_is_supported(eapi):
+ yield atom
+ continue
+
+ # Validate IUSE and IUSE, for early detection of vardb corruption.
+ useflag_re = _get_useflag_re(eapi)
+ valid_iuse = []
+ for x in iuse.split():
+ if x[:1] in ("+", "-"):
+ x = x[1:]
+ if useflag_re.match(x) is not None:
+ valid_iuse.append(x)
+ valid_iuse = frozenset(valid_iuse)
+
+ iuse_implicit_match = vardb.settings._iuse_implicit_match
+ valid_use = []
+ for x in use.split():
+ if x in valid_iuse or iuse_implicit_match(x):
+ valid_use.append(x)
+ valid_use = frozenset(valid_use)
+
+ success, atoms = portage.dep_check(rdepend,
+ None, vardb.settings, myuse=valid_use,
+ myroot=vardb.root, trees={vardb.root:{"porttree":vardb.vartree,
+ "vartree":vardb.vartree}})
+
+ if success:
+ stack.extend(atoms)
+ else:
+ yield atom
diff --git a/portage_with_autodep/pym/portage/dbapi/bintree.py b/portage_with_autodep/pym/portage/dbapi/bintree.py
new file mode 100644
index 0000000..62fc623
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/bintree.py
@@ -0,0 +1,1366 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["bindbapi", "binarytree"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum:hashfunc_map,perform_multiple_checksums,verify_all',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dep:dep_getkey,isjustname,match_from_list',
+ 'portage.output:EOutput,colorize',
+ 'portage.locks:lockfile,unlockfile',
+ 'portage.package.ebuild.doebuild:_vdb_use_conditional_atoms',
+ 'portage.package.ebuild.fetch:_check_distfile',
+ 'portage.update:update_dbentries',
+ 'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
+ 'writemsg,writemsg_stdout',
+ 'portage.util.listdir:listdir',
+ 'portage.versions:best,catpkgsplit,catsplit',
+)
+
+from portage.cache.mappings import slot_dict_class
+from portage.const import CACHE_PATH
+from portage.dbapi.virtual import fakedbapi
+from portage.dep import Atom, use_reduce, paren_enclose
+from portage.exception import AlarmSignal, InvalidPackageName, \
+ PermissionDenied, PortageException
+from portage.localization import _
+from portage import _movefile
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+import codecs
+import errno
+import io
+import re
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+from itertools import chain
+try:
+ from urllib.parse import urlparse
+ from urllib.request import urlopen as urllib_request_urlopen
+except ImportError:
+ from urlparse import urlparse
+ from urllib import urlopen as urllib_request_urlopen
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class bindbapi(fakedbapi):
+ _known_keys = frozenset(list(fakedbapi._known_keys) + \
+ ["CHOST", "repository", "USE"])
+ def __init__(self, mybintree=None, **kwargs):
+ fakedbapi.__init__(self, **kwargs)
+ self.bintree = mybintree
+ self.move_ent = mybintree.move_ent
+ self.cpvdict={}
+ self.cpdict={}
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(
+ ["BUILD_TIME", "CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
+ "RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES",
+ "REQUIRED_USE"])
+ self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
+ self._aux_cache = {}
+
+ def match(self, *pargs, **kwargs):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.match(self, *pargs, **kwargs)
+
+ def cpv_exists(self, cpv, myrepo=None):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_exists(self, cpv)
+
+ def cpv_inject(self, cpv, **kwargs):
+ self._aux_cache.pop(cpv, None)
+ fakedbapi.cpv_inject(self, cpv, **kwargs)
+
+ def cpv_remove(self, cpv):
+ self._aux_cache.pop(cpv, None)
+ fakedbapi.cpv_remove(self, cpv)
+
+ def aux_get(self, mycpv, wants, myrepo=None):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ cache_me = False
+ if not self._known_keys.intersection(
+ wants).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ return [aux_cache.get(x, "") for x in wants]
+ cache_me = True
+ mysplit = mycpv.split("/")
+ mylist = []
+ tbz2name = mysplit[1]+".tbz2"
+ if not self.bintree._remotepkgs or \
+ not self.bintree.isremote(mycpv):
+ tbz2_path = self.bintree.getname(mycpv)
+ if not os.path.exists(tbz2_path):
+ raise KeyError(mycpv)
+ metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
+ def getitem(k):
+ v = metadata_bytes.get(_unicode_encode(k,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace'))
+ if v is not None:
+ v = _unicode_decode(v,
+ encoding=_encodings['repo.content'], errors='replace')
+ return v
+ else:
+ getitem = self.bintree._remotepkgs[mycpv].get
+ mydata = {}
+ mykeys = wants
+ if cache_me:
+ mykeys = self._aux_cache_keys.union(wants)
+ for x in mykeys:
+ myval = getitem(x)
+ # myval is None if the key doesn't exist
+ # or the tbz2 is corrupt.
+ if myval:
+ mydata[x] = " ".join(myval.split())
+
+ if not mydata.setdefault('EAPI', _unicode_decode('0')):
+ mydata['EAPI'] = _unicode_decode('0')
+
+ if cache_me:
+ aux_cache = self._aux_cache_slot_dict()
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, _unicode_decode(''))
+ self._aux_cache[mycpv] = aux_cache
+ return [mydata.get(x, _unicode_decode('')) for x in wants]
+
+ def aux_update(self, cpv, values):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ tbz2path = self.bintree.getname(cpv)
+ if not os.path.exists(tbz2path):
+ raise KeyError(cpv)
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+
+ for k, v in values.items():
+ k = _unicode_encode(k,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ v = _unicode_encode(v,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mydata[k] = v
+
+ for k, v in list(mydata.items()):
+ if not v:
+ del mydata[k]
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+ # inject will clear stale caches via cpv_inject.
+ self.bintree.inject(cpv)
+
+ def cp_list(self, *pargs, **kwargs):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_list(self, *pargs, **kwargs)
+
+ def cp_all(self):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_all(self)
+
+ def cpv_all(self):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_all(self)
+
+def _pkgindex_cpv_map_latest_build(pkgindex):
+ """
+ Given a PackageIndex instance, create a dict of cpv -> metadata map.
+ If multiple packages have identical CPV values, prefer the package
+ with latest BUILD_TIME value.
+ @param pkgindex: A PackageIndex instance.
+ @type pkgindex: PackageIndex
+ @rtype: dict
+ @returns: a dict containing entry for the give cpv.
+ """
+ cpv_map = {}
+
+ for d in pkgindex.packages:
+ cpv = d["CPV"]
+
+ btime = d.get('BUILD_TIME', '')
+ try:
+ btime = int(btime)
+ except ValueError:
+ btime = None
+
+ other_d = cpv_map.get(cpv)
+ if other_d is not None:
+ other_btime = other_d.get('BUILD_TIME', '')
+ try:
+ other_btime = int(other_btime)
+ except ValueError:
+ other_btime = None
+ if other_btime and (not btime or other_btime > btime):
+ continue
+
+ cpv_map[cpv] = d
+
+ return cpv_map
+
+class binarytree(object):
+ "this tree scans for a list of all packages available in PKGDIR"
+ def __init__(self, root, pkgdir, virtual=None, settings=None):
+ if True:
+ self.root = root
+ #self.pkgdir=settings["PKGDIR"]
+ self.pkgdir = normalize_path(pkgdir)
+ self.dbapi = bindbapi(self, settings=settings)
+ self.update_ents = self.dbapi.update_ents
+ self.move_slot_ent = self.dbapi.move_slot_ent
+ self.populated = 0
+ self.tree = {}
+ self._remote_has_index = False
+ self._remotepkgs = None # remote metadata indexed by cpv
+ self.invalids = []
+ self.settings = settings
+ self._pkg_paths = {}
+ self._pkgindex_uri = {}
+ self._populating = False
+ self._all_directory = os.path.isdir(
+ os.path.join(self.pkgdir, "All"))
+ self._pkgindex_version = 0
+ self._pkgindex_hashes = ["MD5","SHA1"]
+ self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
+ self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
+ self._pkgindex_keys.update(["CPV", "MTIME", "SIZE"])
+ self._pkgindex_aux_keys = \
+ ["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI",
+ "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
+ "PROVIDE", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
+ "REQUIRED_USE", "BASE_URI"]
+ self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
+ self._pkgindex_use_evaluated_keys = \
+ ("LICENSE", "RDEPEND", "DEPEND",
+ "PDEPEND", "PROPERTIES", "PROVIDE")
+ self._pkgindex_header_keys = set([
+ "ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
+ "ACCEPT_PROPERTIES", "CBUILD",
+ "CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
+ "GENTOO_MIRRORS", "INSTALL_MASK", "SYNC", "USE"])
+ self._pkgindex_default_pkg_data = {
+ "BUILD_TIME" : "",
+ "DEPEND" : "",
+ "EAPI" : "0",
+ "IUSE" : "",
+ "KEYWORDS": "",
+ "LICENSE" : "",
+ "PATH" : "",
+ "PDEPEND" : "",
+ "PROPERTIES" : "",
+ "PROVIDE" : "",
+ "RDEPEND" : "",
+ "RESTRICT": "",
+ "SLOT" : "0",
+ "USE" : "",
+ "DEFINED_PHASES" : "",
+ "REQUIRED_USE" : ""
+ }
+ self._pkgindex_inherited_keys = ["CHOST", "repository"]
+
+ # Populate the header with appropriate defaults.
+ self._pkgindex_default_header_data = {
+ "CHOST" : self.settings.get("CHOST", ""),
+ "repository" : "",
+ }
+
+ # It is especially important to populate keys like
+ # "repository" that save space when entries can
+ # inherit them from the header. If an existing
+ # pkgindex header already defines these keys, then
+ # they will appropriately override our defaults.
+ main_repo = self.settings.repositories.mainRepo()
+ if main_repo is not None and not main_repo.missing_repo_name:
+ self._pkgindex_default_header_data["repository"] = \
+ main_repo.name
+
+ self._pkgindex_translated_keys = (
+ ("DESCRIPTION" , "DESC"),
+ ("repository" , "REPO"),
+ )
+
+ self._pkgindex_allowed_pkg_keys = set(chain(
+ self._pkgindex_keys,
+ self._pkgindex_aux_keys,
+ self._pkgindex_hashes,
+ self._pkgindex_default_pkg_data,
+ self._pkgindex_inherited_keys,
+ chain(*self._pkgindex_translated_keys)
+ ))
+
+ def move_ent(self, mylist, repo_match=None):
+ if not self.populated:
+ self.populate()
+ origcp = mylist[1]
+ newcp = mylist[2]
+ # sanity check
+ for atom in (origcp, newcp):
+ if not isjustname(atom):
+ raise InvalidPackageName(str(atom))
+ mynewcat = catsplit(newcp)[0]
+ origmatches=self.dbapi.cp_list(origcp)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ mycpv_cp = portage.cpv_getkey(mycpv)
+ if mycpv_cp != origcp:
+ # Ignore PROVIDE virtual match.
+ continue
+ if repo_match is not None \
+ and not repo_match(self.dbapi.aux_get(mycpv,
+ ['repository'])[0]):
+ continue
+ mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
+ myoldpkg = catsplit(mycpv)[1]
+ mynewpkg = catsplit(mynewcpv)[1]
+
+ if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
+ writemsg(_("!!! Cannot update binary: Destination exists.\n"),
+ noiselevel=-1)
+ writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
+ continue
+
+ tbz2path = self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
+ noiselevel=-1)
+ continue
+
+ moves += 1
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ updated_items = update_dbentries([mylist], mydata)
+ mydata.update(updated_items)
+ mydata[b'PF'] = \
+ _unicode_encode(mynewpkg + "\n",
+ encoding=_encodings['repo.content'])
+ mydata[b'CATEGORY'] = \
+ _unicode_encode(mynewcat + "\n",
+ encoding=_encodings['repo.content'])
+ if mynewpkg != myoldpkg:
+ ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
+ encoding=_encodings['repo.content']), None)
+ if ebuild_data is not None:
+ mydata[_unicode_encode(mynewpkg + '.ebuild',
+ encoding=_encodings['repo.content'])] = ebuild_data
+
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+
+ self.dbapi.cpv_remove(mycpv)
+ del self._pkg_paths[mycpv]
+ new_path = self.getname(mynewcpv)
+ self._pkg_paths[mynewcpv] = os.path.join(
+ *new_path.split(os.path.sep)[-2:])
+ if new_path != mytbz2:
+ self._ensure_dir(os.path.dirname(new_path))
+ _movefile(tbz2path, new_path, mysettings=self.settings)
+ self._remove_symlink(mycpv)
+ if new_path.split(os.path.sep)[-2] == "All":
+ self._create_symlink(mynewcpv)
+ self.inject(mynewcpv)
+
+ return moves
+
+ def _remove_symlink(self, cpv):
+ """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
+ the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
+ removed if os.path.islink() returns False."""
+ mycat, mypkg = catsplit(cpv)
+ mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+ if os.path.islink(mylink):
+ """Only remove it if it's really a link so that this method never
+ removes a real package that was placed here to avoid a collision."""
+ os.unlink(mylink)
+ try:
+ os.rmdir(os.path.join(self.pkgdir, mycat))
+ except OSError as e:
+ if e.errno not in (errno.ENOENT,
+ errno.ENOTEMPTY, errno.EEXIST):
+ raise
+ del e
+
+ def _create_symlink(self, cpv):
+ """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
+ ${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
+ exist in the location of the symlink will first be removed."""
+ mycat, mypkg = catsplit(cpv)
+ full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+ self._ensure_dir(os.path.dirname(full_path))
+ try:
+ os.unlink(full_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
+
+ def prevent_collision(self, cpv):
+ """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
+ use for a given cpv. If a collision will occur with an existing
+ package from another category, the existing package will be bumped to
+ ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
+ if not self._all_directory:
+ return
+
+ # Copy group permissions for new directories that
+ # may have been created.
+ for path in ("All", catsplit(cpv)[0]):
+ path = os.path.join(self.pkgdir, path)
+ self._ensure_dir(path)
+ if not os.access(path, os.W_OK):
+ raise PermissionDenied("access('%s', W_OK)" % path)
+
+ full_path = self.getname(cpv)
+ if "All" == full_path.split(os.path.sep)[-2]:
+ return
+ """Move a colliding package if it exists. Code below this point only
+ executes in rare cases."""
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ mypath = os.path.join("All", myfile)
+ dest_path = os.path.join(self.pkgdir, mypath)
+
+ try:
+ st = os.lstat(dest_path)
+ except OSError:
+ st = None
+ else:
+ if stat.S_ISLNK(st.st_mode):
+ st = None
+ try:
+ os.unlink(dest_path)
+ except OSError:
+ if os.path.exists(dest_path):
+ raise
+
+ if st is not None:
+ # For invalid packages, other_cat could be None.
+ other_cat = portage.xpak.tbz2(dest_path).getfile(b"CATEGORY")
+ if other_cat:
+ other_cat = _unicode_decode(other_cat,
+ encoding=_encodings['repo.content'], errors='replace')
+ other_cat = other_cat.strip()
+ other_cpv = other_cat + "/" + mypkg
+ self._move_from_all(other_cpv)
+ self.inject(other_cpv)
+ self._move_to_all(cpv)
+
+ def _ensure_dir(self, path):
+ """
+ Create the specified directory. Also, copy gid and group mode
+ bits from self.pkgdir if possible.
+ @param cat_dir: Absolute path of the directory to be created.
+ @type cat_dir: String
+ """
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ ensure_dirs(path)
+ return
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
+ try:
+ ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ if not os.path.isdir(path):
+ raise
+
+ def _move_to_all(self, cpv):
+ """If the file exists, move it. Whether or not it exists, update state
+ for future getname() calls."""
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ self._pkg_paths[cpv] = os.path.join("All", myfile)
+ src_path = os.path.join(self.pkgdir, mycat, myfile)
+ try:
+ mystat = os.lstat(src_path)
+ except OSError as e:
+ mystat = None
+ if mystat and stat.S_ISREG(mystat.st_mode):
+ self._ensure_dir(os.path.join(self.pkgdir, "All"))
+ dest_path = os.path.join(self.pkgdir, "All", myfile)
+ _movefile(src_path, dest_path, mysettings=self.settings)
+ self._create_symlink(cpv)
+ self.inject(cpv)
+
+ def _move_from_all(self, cpv):
+ """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
+ ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
+ self._remove_symlink(cpv)
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ mypath = os.path.join(mycat, myfile)
+ dest_path = os.path.join(self.pkgdir, mypath)
+ self._ensure_dir(os.path.dirname(dest_path))
+ src_path = os.path.join(self.pkgdir, "All", myfile)
+ _movefile(src_path, dest_path, mysettings=self.settings)
+ self._pkg_paths[cpv] = mypath
+
+ def populate(self, getbinpkgs=0):
+ "populates the binarytree"
+
+ if self._populating:
+ return
+
+ pkgindex_lock = None
+ try:
+ if os.access(self.pkgdir, os.W_OK):
+ pkgindex_lock = lockfile(self._pkgindex_file,
+ wantnewlockfile=1)
+ self._populating = True
+ self._populate(getbinpkgs)
+ finally:
+ if pkgindex_lock:
+ unlockfile(pkgindex_lock)
+ self._populating = False
+
+ def _populate(self, getbinpkgs=0):
+ if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
+ return 0
+
+ # Clear all caches in case populate is called multiple times
+ # as may be the case when _global_updates calls populate()
+ # prior to performing package moves since it only wants to
+ # operate on local packages (getbinpkgs=0).
+ self._remotepkgs = None
+ self.dbapi._clear_cache()
+ self.dbapi._aux_cache.clear()
+ if True:
+ pkg_paths = {}
+ self._pkg_paths = pkg_paths
+ dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
+ if "All" in dirs:
+ dirs.remove("All")
+ dirs.sort()
+ dirs.insert(0, "All")
+ pkgindex = self._load_pkgindex()
+ pf_index = None
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+ header = pkgindex.header
+ metadata = {}
+ for d in pkgindex.packages:
+ metadata[d["CPV"]] = d
+ update_pkgindex = False
+ for mydir in dirs:
+ for myfile in listdir(os.path.join(self.pkgdir, mydir)):
+ if not myfile.endswith(".tbz2"):
+ continue
+ mypath = os.path.join(mydir, myfile)
+ full_path = os.path.join(self.pkgdir, mypath)
+ s = os.lstat(full_path)
+ if stat.S_ISLNK(s.st_mode):
+ continue
+
+ # Validate data from the package index and try to avoid
+ # reading the xpak if possible.
+ if mydir != "All":
+ possibilities = None
+ d = metadata.get(mydir+"/"+myfile[:-5])
+ if d:
+ possibilities = [d]
+ else:
+ if pf_index is None:
+ pf_index = {}
+ for mycpv in metadata:
+ mycat, mypf = catsplit(mycpv)
+ pf_index.setdefault(
+ mypf, []).append(metadata[mycpv])
+ possibilities = pf_index.get(myfile[:-5])
+ if possibilities:
+ match = None
+ for d in possibilities:
+ try:
+ if long(d["MTIME"]) != s[stat.ST_MTIME]:
+ continue
+ except (KeyError, ValueError):
+ continue
+ try:
+ if long(d["SIZE"]) != long(s.st_size):
+ continue
+ except (KeyError, ValueError):
+ continue
+ if not self._pkgindex_keys.difference(d):
+ match = d
+ break
+ if match:
+ mycpv = match["CPV"]
+ if mycpv in pkg_paths:
+ # discard duplicates (All/ is preferred)
+ continue
+ pkg_paths[mycpv] = mypath
+ # update the path if the package has been moved
+ oldpath = d.get("PATH")
+ if oldpath and oldpath != mypath:
+ update_pkgindex = True
+ if mypath != mycpv + ".tbz2":
+ d["PATH"] = mypath
+ if not oldpath:
+ update_pkgindex = True
+ else:
+ d.pop("PATH", None)
+ if oldpath:
+ update_pkgindex = True
+ self.dbapi.cpv_inject(mycpv)
+ if not self.dbapi._aux_cache_keys.difference(d):
+ aux_cache = self.dbapi._aux_cache_slot_dict()
+ for k in self.dbapi._aux_cache_keys:
+ aux_cache[k] = d[k]
+ self.dbapi._aux_cache[mycpv] = aux_cache
+ continue
+ if not os.access(full_path, os.R_OK):
+ writemsg(_("!!! Permission denied to read " \
+ "binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ self.invalids.append(myfile[:-5])
+ continue
+ metadata_bytes = portage.xpak.tbz2(full_path).get_data()
+ mycat = _unicode_decode(metadata_bytes.get(b"CATEGORY", ""),
+ encoding=_encodings['repo.content'], errors='replace')
+ mypf = _unicode_decode(metadata_bytes.get(b"PF", ""),
+ encoding=_encodings['repo.content'], errors='replace')
+ slot = _unicode_decode(metadata_bytes.get(b"SLOT", ""),
+ encoding=_encodings['repo.content'], errors='replace')
+ mypkg = myfile[:-5]
+ if not mycat or not mypf or not slot:
+ #old-style or corrupt package
+ writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ missing_keys = []
+ if not mycat:
+ missing_keys.append("CATEGORY")
+ if not mypf:
+ missing_keys.append("PF")
+ if not slot:
+ missing_keys.append("SLOT")
+ msg = []
+ if missing_keys:
+ missing_keys.sort()
+ msg.append(_("Missing metadata key(s): %s.") % \
+ ", ".join(missing_keys))
+ msg.append(_(" This binary package is not " \
+ "recoverable and should be deleted."))
+ for line in textwrap.wrap("".join(msg), 72):
+ writemsg("!!! %s\n" % line, noiselevel=-1)
+ self.invalids.append(mypkg)
+ continue
+ mycat = mycat.strip()
+ slot = slot.strip()
+ if mycat != mydir and mydir != "All":
+ continue
+ if mypkg != mypf.strip():
+ continue
+ mycpv = mycat + "/" + mypkg
+ if mycpv in pkg_paths:
+ # All is first, so it's preferred.
+ continue
+ if not self.dbapi._category_re.match(mycat):
+ writemsg(_("!!! Binary package has an " \
+ "unrecognized category: '%s'\n") % full_path,
+ noiselevel=-1)
+ writemsg(_("!!! '%s' has a category that is not" \
+ " listed in %setc/portage/categories\n") % \
+ (mycpv, self.settings["PORTAGE_CONFIGROOT"]),
+ noiselevel=-1)
+ continue
+ pkg_paths[mycpv] = mypath
+ self.dbapi.cpv_inject(mycpv)
+ update_pkgindex = True
+ d = metadata.get(mycpv, {})
+ if d:
+ try:
+ if long(d["MTIME"]) != s[stat.ST_MTIME]:
+ d.clear()
+ except (KeyError, ValueError):
+ d.clear()
+ if d:
+ try:
+ if long(d["SIZE"]) != long(s.st_size):
+ d.clear()
+ except (KeyError, ValueError):
+ d.clear()
+
+ d["CPV"] = mycpv
+ d["SLOT"] = slot
+ d["MTIME"] = str(s[stat.ST_MTIME])
+ d["SIZE"] = str(s.st_size)
+
+ d.update(zip(self._pkgindex_aux_keys,
+ self.dbapi.aux_get(mycpv, self._pkgindex_aux_keys)))
+ try:
+ self._eval_use_flags(mycpv, d)
+ except portage.exception.InvalidDependString:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % \
+ self.getname(mycpv), noiselevel=-1)
+ self.dbapi.cpv_remove(mycpv)
+ del pkg_paths[mycpv]
+
+ # record location if it's non-default
+ if mypath != mycpv + ".tbz2":
+ d["PATH"] = mypath
+ else:
+ d.pop("PATH", None)
+ metadata[mycpv] = d
+ if not self.dbapi._aux_cache_keys.difference(d):
+ aux_cache = self.dbapi._aux_cache_slot_dict()
+ for k in self.dbapi._aux_cache_keys:
+ aux_cache[k] = d[k]
+ self.dbapi._aux_cache[mycpv] = aux_cache
+
+ for cpv in list(metadata):
+ if cpv not in pkg_paths:
+ del metadata[cpv]
+
+ # Do not bother to write the Packages index if $PKGDIR/All/ exists
+ # since it will provide no benefit due to the need to read CATEGORY
+ # from xpak.
+ if update_pkgindex and os.access(self.pkgdir, os.W_OK):
+ del pkgindex.packages[:]
+ pkgindex.packages.extend(iter(metadata.values()))
+ self._update_pkgindex_header(pkgindex.header)
+ f = atomic_ofstream(self._pkgindex_file)
+ pkgindex.write(f)
+ f.close()
+
+ if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
+ writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
+ noiselevel=-1)
+
+ if not getbinpkgs or 'PORTAGE_BINHOST' not in self.settings:
+ self.populated=1
+ return
+ self._remotepkgs = {}
+ for base_url in self.settings["PORTAGE_BINHOST"].split():
+ parsed_url = urlparse(base_url)
+ host = parsed_url.netloc
+ port = parsed_url.port
+ user = None
+ passwd = None
+ user_passwd = ""
+ if "@" in host:
+ user, host = host.split("@", 1)
+ user_passwd = user + "@"
+ if ":" in user:
+ user, passwd = user.split(":", 1)
+ port_args = []
+ if port is not None:
+ port_str = ":%s" % (port,)
+ if host.endswith(port_str):
+ host = host[:-len(port_str)]
+ pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
+ host, parsed_url.path.lstrip("/"), "Packages")
+ pkgindex = self._new_pkgindex()
+ try:
+ f = io.open(_unicode_encode(pkgindex_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ try:
+ pkgindex.read(f)
+ finally:
+ f.close()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ local_timestamp = pkgindex.header.get("TIMESTAMP", None)
+ rmt_idx = self._new_pkgindex()
+ proc = None
+ tmp_filename = None
+ try:
+ # urlparse.urljoin() only works correctly with recognized
+ # protocols and requires the base url to have a trailing
+ # slash, so join manually...
+ url = base_url.rstrip("/") + "/Packages"
+ try:
+ f = urllib_request_urlopen(url)
+ except IOError:
+ path = parsed_url.path.rstrip("/") + "/Packages"
+
+ if parsed_url.scheme == 'sftp':
+ # The sftp command complains about 'Illegal seek' if
+ # we try to make it write to /dev/stdout, so use a
+ # temp file instead.
+ fd, tmp_filename = tempfile.mkstemp()
+ os.close(fd)
+ if port is not None:
+ port_args = ['-P', "%s" % (port,)]
+ proc = subprocess.Popen(['sftp'] + port_args + \
+ [user_passwd + host + ":" + path, tmp_filename])
+ if proc.wait() != os.EX_OK:
+ raise
+ f = open(tmp_filename, 'rb')
+ elif parsed_url.scheme == 'ssh':
+ if port is not None:
+ port_args = ['-p', "%s" % (port,)]
+ proc = subprocess.Popen(['ssh'] + port_args + \
+ [user_passwd + host, '--', 'cat', path],
+ stdout=subprocess.PIPE)
+ f = proc.stdout
+ else:
+ setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
+ fcmd = self.settings.get(setting)
+ if not fcmd:
+ raise
+ fd, tmp_filename = tempfile.mkstemp()
+ tmp_dirname, tmp_basename = os.path.split(tmp_filename)
+ os.close(fd)
+ success = portage.getbinpkg.file_get(url,
+ tmp_dirname, fcmd=fcmd, filename=tmp_basename)
+ if not success:
+ raise EnvironmentError("%s failed" % (setting,))
+ f = open(tmp_filename, 'rb')
+
+ f_dec = codecs.iterdecode(f,
+ _encodings['repo.content'], errors='replace')
+ try:
+ rmt_idx.readHeader(f_dec)
+ remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
+ if not remote_timestamp:
+ # no timestamp in the header, something's wrong
+ pkgindex = None
+ writemsg(_("\n\n!!! Binhost package index " \
+ " has no TIMESTAMP field.\n"), noiselevel=-1)
+ else:
+ if not self._pkgindex_version_supported(rmt_idx):
+ writemsg(_("\n\n!!! Binhost package index version" \
+ " is not supported: '%s'\n") % \
+ rmt_idx.header.get("VERSION"), noiselevel=-1)
+ pkgindex = None
+ elif local_timestamp != remote_timestamp:
+ rmt_idx.readBody(f_dec)
+ pkgindex = rmt_idx
+ finally:
+ # Timeout after 5 seconds, in case close() blocks
+ # indefinitely (see bug #350139).
+ try:
+ try:
+ AlarmSignal.register(5)
+ f.close()
+ finally:
+ AlarmSignal.unregister()
+ except AlarmSignal:
+ writemsg("\n\n!!! %s\n" % \
+ _("Timed out while closing connection to binhost"),
+ noiselevel=-1)
+ except EnvironmentError as e:
+ writemsg(_("\n\n!!! Error fetching binhost package" \
+ " info from '%s'\n") % base_url)
+ writemsg("!!! %s\n\n" % str(e))
+ del e
+ pkgindex = None
+ if proc is not None:
+ if proc.poll() is None:
+ proc.kill()
+ proc.wait()
+ proc = None
+ if tmp_filename is not None:
+ try:
+ os.unlink(tmp_filename)
+ except OSError:
+ pass
+ if pkgindex is rmt_idx:
+ pkgindex.modified = False # don't update the header
+ try:
+ ensure_dirs(os.path.dirname(pkgindex_file))
+ f = atomic_ofstream(pkgindex_file)
+ pkgindex.write(f)
+ f.close()
+ except (IOError, PortageException):
+ if os.access(os.path.dirname(pkgindex_file), os.W_OK):
+ raise
+ # The current user doesn't have permission to cache the
+ # file, but that's alright.
+ if pkgindex:
+ # Organize remote package list as a cpv -> metadata map.
+ remotepkgs = _pkgindex_cpv_map_latest_build(pkgindex)
+ remote_base_uri = pkgindex.header.get("URI", base_url)
+ for cpv, remote_metadata in remotepkgs.items():
+ remote_metadata["BASE_URI"] = remote_base_uri
+ self._pkgindex_uri[cpv] = url
+ self._remotepkgs.update(remotepkgs)
+ self._remote_has_index = True
+ for cpv in remotepkgs:
+ self.dbapi.cpv_inject(cpv)
+ if True:
+ # Remote package instances override local package
+ # if they are not identical.
+ hash_names = ["SIZE"] + self._pkgindex_hashes
+ for cpv, local_metadata in metadata.items():
+ remote_metadata = self._remotepkgs.get(cpv)
+ if remote_metadata is None:
+ continue
+ # Use digests to compare identity.
+ identical = True
+ for hash_name in hash_names:
+ local_value = local_metadata.get(hash_name)
+ if local_value is None:
+ continue
+ remote_value = remote_metadata.get(hash_name)
+ if remote_value is None:
+ continue
+ if local_value != remote_value:
+ identical = False
+ break
+ if identical:
+ del self._remotepkgs[cpv]
+ else:
+ # Override the local package in the aux_get cache.
+ self.dbapi._aux_cache[cpv] = remote_metadata
+ else:
+ # Local package instances override remote instances.
+ for cpv in metadata:
+ self._remotepkgs.pop(cpv, None)
+ continue
+ try:
+ chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
+ if chunk_size < 8:
+ chunk_size = 8
+ except (ValueError, KeyError):
+ chunk_size = 3000
+ writemsg_stdout("\n")
+ writemsg_stdout(
+ colorize("GOOD", _("Fetching bininfo from ")) + \
+ re.sub(r'//(.+):.+@(.+)/', r'//\1:*password*@\2/', base_url) + "\n")
+ remotepkgs = portage.getbinpkg.dir_get_metadata(
+ base_url, chunk_size=chunk_size)
+
+ for mypkg, remote_metadata in remotepkgs.items():
+ mycat = remote_metadata.get("CATEGORY")
+ if mycat is None:
+ #old-style or corrupt package
+ writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
+ noiselevel=-1)
+ continue
+ mycat = mycat.strip()
+ fullpkg = mycat+"/"+mypkg[:-5]
+
+ if fullpkg in metadata:
+ # When using this old protocol, comparison with the remote
+ # package isn't supported, so the local package is always
+ # preferred even if getbinpkgsonly is enabled.
+ continue
+
+ if not self.dbapi._category_re.match(mycat):
+ writemsg(_("!!! Remote binary package has an " \
+ "unrecognized category: '%s'\n") % fullpkg,
+ noiselevel=-1)
+ writemsg(_("!!! '%s' has a category that is not" \
+ " listed in %setc/portage/categories\n") % \
+ (fullpkg, self.settings["PORTAGE_CONFIGROOT"]),
+ noiselevel=-1)
+ continue
+ mykey = portage.cpv_getkey(fullpkg)
+ try:
+ # invalid tbz2's can hurt things.
+ self.dbapi.cpv_inject(fullpkg)
+ for k, v in remote_metadata.items():
+ remote_metadata[k] = v.strip()
+ remote_metadata["BASE_URI"] = base_url
+
+ # Eliminate metadata values with names that digestCheck
+ # uses, since they are not valid when using the old
+ # protocol. Typically this is needed for SIZE metadata
+ # which corresponds to the size of the unpacked files
+ # rather than the binpkg file size, triggering digest
+ # verification failures as reported in bug #303211.
+ remote_metadata.pop('SIZE', None)
+ for k in portage.checksum.hashfunc_map:
+ remote_metadata.pop(k, None)
+
+ self._remotepkgs[fullpkg] = remote_metadata
+ except SystemExit as e:
+ raise
+ except:
+ writemsg(_("!!! Failed to inject remote binary package: %s\n") % fullpkg,
+ noiselevel=-1)
+ continue
+ self.populated=1
+
+ def inject(self, cpv, filename=None):
+ """Add a freshly built package to the database. This updates
+ $PKGDIR/Packages with the new package metadata (including MD5).
+ @param cpv: The cpv of the new package to inject
+ @type cpv: string
+ @param filename: File path of the package to inject, or None if it's
+ already in the location returned by getname()
+ @type filename: string
+ @rtype: None
+ """
+ mycat, mypkg = catsplit(cpv)
+ if not self.populated:
+ self.populate()
+ if filename is None:
+ full_path = self.getname(cpv)
+ else:
+ full_path = filename
+ try:
+ s = os.stat(full_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
+ noiselevel=-1)
+ return
+ mytbz2 = portage.xpak.tbz2(full_path)
+ slot = mytbz2.getfile("SLOT")
+ if slot is None:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ return
+ slot = slot.strip()
+ self.dbapi.cpv_inject(cpv)
+
+ # Reread the Packages index (in case it's been changed by another
+ # process) and then updated it, all while holding a lock.
+ pkgindex_lock = None
+ created_symlink = False
+ try:
+ pkgindex_lock = lockfile(self._pkgindex_file,
+ wantnewlockfile=1)
+ if filename is not None:
+ new_filename = self.getname(cpv)
+ try:
+ samefile = os.path.samefile(filename, new_filename)
+ except OSError:
+ samefile = False
+ if not samefile:
+ self._ensure_dir(os.path.dirname(new_filename))
+ _movefile(filename, new_filename, mysettings=self.settings)
+ if self._all_directory and \
+ self.getname(cpv).split(os.path.sep)[-2] == "All":
+ self._create_symlink(cpv)
+ created_symlink = True
+ pkgindex = self._load_pkgindex()
+
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+
+ # Discard remote metadata to ensure that _pkgindex_entry
+ # gets the local metadata. This also updates state for future
+ # isremote calls.
+ if self._remotepkgs is not None:
+ self._remotepkgs.pop(cpv, None)
+
+ # Discard cached metadata to ensure that _pkgindex_entry
+ # doesn't return stale metadata.
+ self.dbapi._aux_cache.pop(cpv, None)
+
+ try:
+ d = self._pkgindex_entry(cpv)
+ except portage.exception.InvalidDependString:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % \
+ self.getname(cpv), noiselevel=-1)
+ self.dbapi.cpv_remove(cpv)
+ del self._pkg_paths[cpv]
+ return
+
+ # If found, remove package(s) with duplicate path.
+ path = d.get("PATH", "")
+ for i in range(len(pkgindex.packages) - 1, -1, -1):
+ d2 = pkgindex.packages[i]
+ if path and path == d2.get("PATH"):
+ # Handle path collisions in $PKGDIR/All
+ # when CPV is not identical.
+ del pkgindex.packages[i]
+ elif cpv == d2.get("CPV"):
+ if path == d2.get("PATH", ""):
+ del pkgindex.packages[i]
+ elif created_symlink and not d2.get("PATH", ""):
+ # Delete entry for the package that was just
+ # overwritten by a symlink to this package.
+ del pkgindex.packages[i]
+
+ pkgindex.packages.append(d)
+
+ self._update_pkgindex_header(pkgindex.header)
+ f = atomic_ofstream(os.path.join(self.pkgdir, "Packages"))
+ pkgindex.write(f)
+ f.close()
+ finally:
+ if pkgindex_lock:
+ unlockfile(pkgindex_lock)
+
+ def _pkgindex_entry(self, cpv):
+ """
+ Performs checksums and evaluates USE flag conditionals.
+ Raises InvalidDependString if necessary.
+ @rtype: dict
+ @returns: a dict containing entry for the give cpv.
+ """
+
+ pkg_path = self.getname(cpv)
+
+ d = dict(zip(self._pkgindex_aux_keys,
+ self.dbapi.aux_get(cpv, self._pkgindex_aux_keys)))
+
+ d.update(perform_multiple_checksums(
+ pkg_path, hashes=self._pkgindex_hashes))
+
+ d["CPV"] = cpv
+ st = os.stat(pkg_path)
+ d["MTIME"] = str(st[stat.ST_MTIME])
+ d["SIZE"] = str(st.st_size)
+
+ rel_path = self._pkg_paths[cpv]
+ # record location if it's non-default
+ if rel_path != cpv + ".tbz2":
+ d["PATH"] = rel_path
+
+ self._eval_use_flags(cpv, d)
+ return d
+
+ def _new_pkgindex(self):
+ return portage.getbinpkg.PackageIndex(
+ allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
+ default_header_data=self._pkgindex_default_header_data,
+ default_pkg_data=self._pkgindex_default_pkg_data,
+ inherited_keys=self._pkgindex_inherited_keys,
+ translated_keys=self._pkgindex_translated_keys)
+
+ def _update_pkgindex_header(self, header):
+ portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
+ profiles_base = os.path.join(portdir, "profiles") + os.path.sep
+ if self.settings.profile_path:
+ profile_path = normalize_path(
+ os.path.realpath(self.settings.profile_path))
+ if profile_path.startswith(profiles_base):
+ profile_path = profile_path[len(profiles_base):]
+ header["PROFILE"] = profile_path
+ header["VERSION"] = str(self._pkgindex_version)
+ base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
+ if base_uri:
+ header["URI"] = base_uri
+ else:
+ header.pop("URI", None)
+ for k in self._pkgindex_header_keys:
+ v = self.settings.get(k, None)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
+ def _pkgindex_version_supported(self, pkgindex):
+ version = pkgindex.header.get("VERSION")
+ if version:
+ try:
+ if int(version) <= self._pkgindex_version:
+ return True
+ except ValueError:
+ pass
+ return False
+
+ def _eval_use_flags(self, cpv, metadata):
+ use = frozenset(metadata["USE"].split())
+ raw_use = use
+ iuse = set(f.lstrip("-+") for f in metadata["IUSE"].split())
+ use = [f for f in use if f in iuse]
+ use.sort()
+ metadata["USE"] = " ".join(use)
+ for k in self._pkgindex_use_evaluated_keys:
+ if k.endswith('DEPEND'):
+ token_class = Atom
+ else:
+ token_class = None
+
+ try:
+ deps = metadata[k]
+ deps = use_reduce(deps, uselist=raw_use, token_class=token_class)
+ deps = paren_enclose(deps)
+ except portage.exception.InvalidDependString as e:
+ writemsg("%s: %s\n" % (k, str(e)),
+ noiselevel=-1)
+ raise
+ metadata[k] = deps
+
+ def exists_specific(self, cpv):
+ if not self.populated:
+ self.populate()
+ return self.dbapi.match(
+ dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
+
+ def dep_bestmatch(self, mydep):
+ "compatibility method -- all matches, not just visible ones"
+ if not self.populated:
+ self.populate()
+ writemsg("\n\n", 1)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mykey = dep_getkey(mydep)
+ writemsg("mykey: %s\n" % mykey, 1)
+ mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
+ writemsg("mymatch: %s\n" % mymatch, 1)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def getname(self, pkgname):
+ """Returns a file location for this package. The default location is
+ ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
+ in the rare event of a collision. The prevent_collision() method can
+ be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
+ specific cpv."""
+ if not self.populated:
+ self.populate()
+ mycpv = pkgname
+ mypath = self._pkg_paths.get(mycpv, None)
+ if mypath:
+ return os.path.join(self.pkgdir, mypath)
+ mycat, mypkg = catsplit(mycpv)
+ if self._all_directory:
+ mypath = os.path.join("All", mypkg + ".tbz2")
+ if mypath in self._pkg_paths.values():
+ mypath = os.path.join(mycat, mypkg + ".tbz2")
+ else:
+ mypath = os.path.join(mycat, mypkg + ".tbz2")
+ self._pkg_paths[mycpv] = mypath # cache for future lookups
+ return os.path.join(self.pkgdir, mypath)
+
+ def isremote(self, pkgname):
+ """Returns true if the package is kept remotely and it has not been
+ downloaded (or it is only partially downloaded)."""
+ if self._remotepkgs is None or pkgname not in self._remotepkgs:
+ return False
+ # Presence in self._remotepkgs implies that it's remote. When a
+ # package is downloaded, state is updated by self.inject().
+ return True
+
+ def get_pkgindex_uri(self, pkgname):
+ """Returns the URI to the Packages file for a given package."""
+ return self._pkgindex_uri.get(pkgname)
+
+ def gettbz2(self, pkgname):
+ """Fetches the package from a remote site, if necessary. Attempts to
+ resume if the file appears to be partially downloaded."""
+ tbz2_path = self.getname(pkgname)
+ tbz2name = os.path.basename(tbz2_path)
+ resume = False
+ if os.path.exists(tbz2_path):
+ if (tbz2name not in self.invalids):
+ return
+ else:
+ resume = True
+ writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
+ noiselevel=-1)
+
+ mydest = os.path.dirname(self.getname(pkgname))
+ self._ensure_dir(mydest)
+ # urljoin doesn't work correctly with unrecognized protocols like sftp
+ if self._remote_has_index:
+ rel_url = self._remotepkgs[pkgname].get("PATH")
+ if not rel_url:
+ rel_url = pkgname+".tbz2"
+ remote_base_uri = self._remotepkgs[pkgname]["BASE_URI"]
+ url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
+ else:
+ url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
+ protocol = urlparse(url)[0]
+ fcmd_prefix = "FETCHCOMMAND"
+ if resume:
+ fcmd_prefix = "RESUMECOMMAND"
+ fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
+ if not fcmd:
+ fcmd = self.settings.get(fcmd_prefix)
+ success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
+ if not success:
+ try:
+ os.unlink(self.getname(pkgname))
+ except OSError:
+ pass
+ raise portage.exception.FileNotFound(mydest)
+ self.inject(pkgname)
+
+ def _load_pkgindex(self):
+ pkgindex = self._new_pkgindex()
+ try:
+ f = io.open(_unicode_encode(self._pkgindex_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ try:
+ pkgindex.read(f)
+ finally:
+ f.close()
+ return pkgindex
+
+ def digestCheck(self, pkg):
+ """
+ Verify digests for the given package and raise DigestException
+ if verification fails.
+ @rtype: bool
+ @returns: True if digests could be located, False otherwise.
+ """
+ cpv = pkg
+ if not isinstance(cpv, basestring):
+ cpv = pkg.cpv
+ pkg = None
+
+ pkg_path = self.getname(cpv)
+ metadata = None
+ if self._remotepkgs is None or cpv not in self._remotepkgs:
+ for d in self._load_pkgindex().packages:
+ if d["CPV"] == cpv:
+ metadata = d
+ break
+ else:
+ metadata = self._remotepkgs[cpv]
+ if metadata is None:
+ return False
+
+ digests = {}
+ for k in hashfunc_map:
+ v = metadata.get(k)
+ if not v:
+ continue
+ digests[k] = v
+
+ if "SIZE" in metadata:
+ try:
+ digests["size"] = int(metadata["SIZE"])
+ except ValueError:
+ writemsg(_("!!! Malformed SIZE attribute in remote " \
+ "metadata for '%s'\n") % cpv)
+
+ if not digests:
+ return False
+
+ eout = EOutput()
+ eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
+ ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
+ if not ok:
+ ok, reason = verify_all(pkg_path, digests)
+ if not ok:
+ raise portage.exception.DigestException(
+ (pkg_path,) + tuple(reason))
+
+ return True
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ pass
+ return myslot
diff --git a/portage_with_autodep/pym/portage/dbapi/cpv_expand.py b/portage_with_autodep/pym/portage/dbapi/cpv_expand.py
new file mode 100644
index 0000000..947194c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/cpv_expand.py
@@ -0,0 +1,106 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["cpv_expand"]
+
+import portage
+from portage.exception import AmbiguousPackageName
+from portage.localization import _
+from portage.util import writemsg
+from portage.versions import _pkgsplit
+
+def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
+ """Given a string (packagename or virtual) expand it into a valid
+ cat/package string. Virtuals use the mydb to determine which provided
+ virtual is a valid choice and defaults to the first element when there
+ are no installed/available candidates."""
+ myslash=mycpv.split("/")
+ mysplit = _pkgsplit(myslash[-1])
+ if settings is None:
+ try:
+ settings = mydb.settings
+ except AttributeError:
+ settings = portage.settings
+ if len(myslash)>2:
+ # this is illegal case.
+ mysplit=[]
+ mykey=mycpv
+ elif len(myslash)==2:
+ if mysplit:
+ mykey=myslash[0]+"/"+mysplit[0]
+ else:
+ mykey=mycpv
+
+ # Since Gentoo stopped using old-style virtuals in
+ # 2011, typically it's possible to avoid getvirtuals()
+ # calls entirely. Therefore, only call getvirtuals()
+ # if the atom category is "virtual" and cp_list()
+ # returns nothing.
+ if mykey.startswith("virtual/") and \
+ hasattr(mydb, "cp_list") and \
+ not mydb.cp_list(mykey, use_cache=use_cache):
+ if hasattr(mydb, "vartree"):
+ settings._populate_treeVirtuals_if_needed(mydb.vartree)
+ virts = settings.getvirtuals().get(mykey)
+ if virts:
+ mykey_orig = mykey
+ for vkey in virts:
+ # The virtuals file can contain a versioned atom, so
+ # it may be necessary to remove the operator and
+ # version from the atom before it is passed into
+ # dbapi.cp_list().
+ if mydb.cp_list(vkey.cp):
+ mykey = str(vkey)
+ break
+ if mykey == mykey_orig:
+ mykey = str(virts[0])
+ #we only perform virtual expansion if we are passed a dbapi
+ else:
+ #specific cpv, no category, ie. "foo-1.0"
+ if mysplit:
+ myp=mysplit[0]
+ else:
+ # "foo" ?
+ myp=mycpv
+ mykey=None
+ matches=[]
+ if mydb and hasattr(mydb, "categories"):
+ for x in mydb.categories:
+ if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
+ matches.append(x+"/"+myp)
+ if len(matches) > 1:
+ virtual_name_collision = False
+ if len(matches) == 2:
+ for x in matches:
+ if not x.startswith("virtual/"):
+ # Assume that the non-virtual is desired. This helps
+ # avoid the ValueError for invalid deps that come from
+ # installed packages (during reverse blocker detection,
+ # for example).
+ mykey = x
+ else:
+ virtual_name_collision = True
+ if not virtual_name_collision:
+ # AmbiguousPackageName inherits from ValueError,
+ # for backward compatibility with calling code
+ # that already handles ValueError.
+ raise AmbiguousPackageName(matches)
+ elif matches:
+ mykey=matches[0]
+
+ if not mykey and not isinstance(mydb, list):
+ if hasattr(mydb, "vartree"):
+ settings._populate_treeVirtuals_if_needed(mydb.vartree)
+ virts_p = settings.get_virts_p().get(myp)
+ if virts_p:
+ mykey = virts_p[0]
+ #again, we only perform virtual expansion if we have a dbapi (not a list)
+ if not mykey:
+ mykey="null/"+myp
+ if mysplit:
+ if mysplit[2]=="r0":
+ return mykey+"-"+mysplit[1]
+ else:
+ return mykey+"-"+mysplit[1]+"-"+mysplit[2]
+ else:
+ return mykey
diff --git a/portage_with_autodep/pym/portage/dbapi/dep_expand.py b/portage_with_autodep/pym/portage/dbapi/dep_expand.py
new file mode 100644
index 0000000..ac8ccf4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/dep_expand.py
@@ -0,0 +1,56 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["dep_expand"]
+
+import re
+
+from portage.dbapi.cpv_expand import cpv_expand
+from portage.dep import Atom, isvalidatom
+from portage.exception import InvalidAtom
+from portage.versions import catsplit
+
+def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
+ '''
+ @rtype: Atom
+ '''
+ orig_dep = mydep
+ if isinstance(orig_dep, Atom):
+ has_cat = True
+ else:
+ if not mydep:
+ return mydep
+ if mydep[0] == "*":
+ mydep = mydep[1:]
+ orig_dep = mydep
+ has_cat = '/' in orig_dep
+ if not has_cat:
+ alphanum = re.search(r'\w', orig_dep)
+ if alphanum:
+ mydep = orig_dep[:alphanum.start()] + "null/" + \
+ orig_dep[alphanum.start():]
+ try:
+ mydep = Atom(mydep, allow_repo=True)
+ except InvalidAtom:
+ # Missing '=' prefix is allowed for backward compatibility.
+ if not isvalidatom("=" + mydep, allow_repo=True):
+ raise
+ mydep = Atom('=' + mydep, allow_repo=True)
+ orig_dep = '=' + orig_dep
+ if not has_cat:
+ null_cat, pn = catsplit(mydep.cp)
+ mydep = pn
+
+ if has_cat:
+ # Optimize most common cases to avoid calling cpv_expand.
+ if not mydep.cp.startswith("virtual/"):
+ return mydep
+ if not hasattr(mydb, "cp_list") or \
+ mydb.cp_list(mydep.cp):
+ return mydep
+ # Fallback to legacy cpv_expand for old-style PROVIDE virtuals.
+ mydep = mydep.cp
+
+ expanded = cpv_expand(mydep, mydb=mydb,
+ use_cache=use_cache, settings=settings)
+ return Atom(orig_dep.replace(mydep, expanded, 1), allow_repo=True)
diff --git a/portage_with_autodep/pym/portage/dbapi/porttree.py b/portage_with_autodep/pym/portage/dbapi/porttree.py
new file mode 100644
index 0000000..ecf275c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/porttree.py
@@ -0,0 +1,1168 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+ "close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"
+]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum',
+ 'portage.data:portage_gid,secpass',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dep:Atom,dep_getkey,match_from_list,use_reduce',
+ 'portage.package.ebuild.doebuild:doebuild',
+ 'portage.util:ensure_dirs,shlex_split,writemsg,writemsg_level',
+ 'portage.util.listdir:listdir',
+ 'portage.versions:best,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp',
+)
+
+from portage.cache import metadata_overlay, volatile
+from portage.cache.cache_errors import CacheError
+from portage.cache.mappings import Mapping
+from portage.dbapi import dbapi
+from portage.exception import PortageException, \
+ FileNotFound, InvalidAtom, InvalidDependString, InvalidPackageName
+from portage.localization import _
+from portage.manifest import Manifest
+
+from portage import eclass_cache, auxdbkeys, \
+ eapi_is_supported, dep_check, \
+ _eapi_is_deprecated
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import OrderedDict
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from _emerge.PollScheduler import PollScheduler
+
+import os as _os
+import io
+import stat
+import sys
+import traceback
+import warnings
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class _repo_info(object):
+ __slots__ = ('name', 'path', 'eclass_db', 'portdir', 'portdir_overlay')
+ def __init__(self, name, path, eclass_db):
+ self.name = name
+ self.path = path
+ self.eclass_db = eclass_db
+ self.portdir = eclass_db.porttrees[0]
+ self.portdir_overlay = ' '.join(eclass_db.porttrees[1:])
+
+class portdbapi(dbapi):
+ """this tree will scan a portage directory located at root (passed to init)"""
+ portdbapi_instances = []
+ _use_mutable = True
+
+ @property
+ def _categories(self):
+ return self.settings.categories
+
+ @property
+ def porttree_root(self):
+ return self.settings.repositories.mainRepoLocation()
+
+ def __init__(self, _unused_param=None, mysettings=None):
+ """
+ @param _unused_param: deprecated, use mysettings['PORTDIR'] instead
+ @type _unused_param: None
+ @param mysettings: an immutable config instance
+ @type mysettings: portage.config
+ """
+ portdbapi.portdbapi_instances.append(self)
+
+ from portage import config
+ if mysettings:
+ self.settings = mysettings
+ else:
+ from portage import settings
+ self.settings = config(clone=settings)
+
+ if _unused_param is not None:
+ warnings.warn("The first parameter of the " + \
+ "portage.dbapi.porttree.portdbapi" + \
+ " constructor is unused since portage-2.1.8. " + \
+ "mysettings['PORTDIR'] is used instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.repositories = self.settings.repositories
+ self.treemap = self.repositories.treemap
+
+ # This is strictly for use in aux_get() doebuild calls when metadata
+ # is generated by the depend phase. It's safest to use a clone for
+ # this purpose because doebuild makes many changes to the config
+ # instance that is passed in.
+ self.doebuild_settings = config(clone=self.settings)
+ self.depcachedir = os.path.realpath(self.settings.depcachedir)
+
+ if os.environ.get("SANDBOX_ON") == "1":
+ # Make api consumers exempt from sandbox violations
+ # when doing metadata cache updates.
+ sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+ if self.depcachedir not in sandbox_write:
+ sandbox_write.append(self.depcachedir)
+ os.environ["SANDBOX_WRITE"] = \
+ ":".join(filter(None, sandbox_write))
+
+ self.porttrees = list(self.settings.repositories.repoLocationList())
+ self.eclassdb = eclass_cache.cache(self.settings.repositories.mainRepoLocation())
+
+ # This is used as sanity check for aux_get(). If there is no
+ # root eclass dir, we assume that PORTDIR is invalid or
+ # missing. This check allows aux_get() to detect a missing
+ # portage tree and return early by raising a KeyError.
+ self._have_root_eclass_dir = os.path.isdir(
+ os.path.join(self.settings.repositories.mainRepoLocation(), "eclass"))
+
+ self.metadbmodule = self.settings.load_best_module("portdbapi.metadbmodule")
+
+ #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+ self.xcache = {}
+ self.frozen = 0
+
+ #Create eclass dbs
+ self._repo_info = {}
+ eclass_dbs = {self.settings.repositories.mainRepoLocation() : self.eclassdb}
+ for repo in self.repositories:
+ if repo.location in self._repo_info:
+ continue
+
+ eclass_db = None
+ for eclass_location in repo.eclass_locations:
+ tree_db = eclass_dbs.get(eclass_location)
+ if tree_db is None:
+ tree_db = eclass_cache.cache(eclass_location)
+ eclass_dbs[eclass_location] = tree_db
+ if eclass_db is None:
+ eclass_db = tree_db.copy()
+ else:
+ eclass_db.append(tree_db)
+
+ self._repo_info[repo.location] = _repo_info(repo.name, repo.location, eclass_db)
+
+ #Keep a list of repo names, sorted by priority (highest priority first).
+ self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order))
+
+ self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule")
+ self.auxdb = {}
+ self._pregen_auxdb = {}
+ self._init_cache_dirs()
+ depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
+ cache_kwargs = {
+ 'gid' : portage_gid,
+ 'perms' : 0o664
+ }
+
+ # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
+ # ~harring
+ filtered_auxdbkeys = [x for x in auxdbkeys if not x.startswith("UNUSED_0")]
+ filtered_auxdbkeys.sort()
+ # If secpass < 1, we don't want to write to the cache
+ # since then we won't be able to apply group permissions
+ # to the cache entries/directories.
+ if secpass < 1 or not depcachedir_w_ok:
+ for x in self.porttrees:
+ try:
+ db_ro = self.auxdbmodule(self.depcachedir, x,
+ filtered_auxdbkeys, readonly=True, **cache_kwargs)
+ except CacheError:
+ self.auxdb[x] = volatile.database(
+ self.depcachedir, x, filtered_auxdbkeys,
+ **cache_kwargs)
+ else:
+ self.auxdb[x] = metadata_overlay.database(
+ self.depcachedir, x, filtered_auxdbkeys,
+ db_rw=volatile.database, db_ro=db_ro,
+ **cache_kwargs)
+ else:
+ for x in self.porttrees:
+ if x in self.auxdb:
+ continue
+ # location, label, auxdbkeys
+ self.auxdb[x] = self.auxdbmodule(
+ self.depcachedir, x, filtered_auxdbkeys, **cache_kwargs)
+ if self.auxdbmodule is metadata_overlay.database:
+ self.auxdb[x].db_ro.ec = self._repo_info[x].eclass_db
+ if "metadata-transfer" not in self.settings.features:
+ for x in self.porttrees:
+ if x in self._pregen_auxdb:
+ continue
+ if os.path.isdir(os.path.join(x, "metadata", "cache")):
+ self._pregen_auxdb[x] = self.metadbmodule(
+ x, "metadata/cache", filtered_auxdbkeys, readonly=True)
+ try:
+ self._pregen_auxdb[x].ec = self._repo_info[x].eclass_db
+ except AttributeError:
+ pass
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(
+ ["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
+ "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
+ "RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"])
+
+ self._aux_cache = {}
+ self._broken_ebuilds = set()
+
+ def _init_cache_dirs(self):
+ """Create /var/cache/edb/dep and adjust permissions for the portage
+ group."""
+
+ dirmode = 0o2070
+ filemode = 0o60
+ modemask = 0o2
+
+ try:
+ ensure_dirs(self.depcachedir, gid=portage_gid,
+ mode=dirmode, mask=modemask)
+ except PortageException as e:
+ pass
+
+ def close_caches(self):
+ if not hasattr(self, "auxdb"):
+ # unhandled exception thrown from constructor
+ return
+ for x in self.auxdb:
+ self.auxdb[x].sync()
+ self.auxdb.clear()
+
+ def flush_cache(self):
+ for x in self.auxdb.values():
+ x.sync()
+
+ def findLicensePath(self, license_name):
+ for x in reversed(self.porttrees):
+ license_path = os.path.join(x, "licenses", license_name)
+ if os.access(license_path, os.R_OK):
+ return license_path
+ return None
+
+ def findname(self,mycpv, mytree = None, myrepo = None):
+ return self.findname2(mycpv, mytree, myrepo)[0]
+
+ def getRepositoryPath(self, repository_id):
+ """
+ This function is required for GLEP 42 compliance; given a valid repository ID
+ it must return a path to the repository
+ TreeMap = { id:path }
+ """
+ return self.treemap.get(repository_id)
+
+ def getRepositoryName(self, canonical_repo_path):
+ """
+ This is the inverse of getRepositoryPath().
+ @param canonical_repo_path: the canonical path of a repository, as
+ resolved by os.path.realpath()
+ @type canonical_repo_path: String
+ @returns: The repo_name for the corresponding repository, or None
+ if the path does not correspond a known repository
+ @rtype: String or None
+ """
+ try:
+ return self.repositories.get_name_for_location(canonical_repo_path)
+ except KeyError:
+ return None
+
+ def getRepositories(self):
+ """
+ This function is required for GLEP 42 compliance; it will return a list of
+ repository IDs
+ TreeMap = {id: path}
+ """
+ return self._ordered_repo_name_list
+
+ def getMissingRepoNames(self):
+ """
+ Returns a list of repository paths that lack profiles/repo_name.
+ """
+ return self.settings.repositories.missing_repo_names
+
+ def getIgnoredRepos(self):
+ """
+ Returns a list of repository paths that have been ignored, because
+ another repo with the same name exists.
+ """
+ return self.settings.repositories.ignored_repos
+
+ def findname2(self, mycpv, mytree=None, myrepo = None):
+ """
+ Returns the location of the CPV, and what overlay it was in.
+ Searches overlays first, then PORTDIR; this allows us to return the first
+ matching file. As opposed to starting in portdir and then doing overlays
+ second, we would have to exhaustively search the overlays until we found
+ the file we wanted.
+ If myrepo is not None it will find packages from this repository(overlay)
+ """
+ if not mycpv:
+ return (None, 0)
+
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ return (None, 0)
+
+ mysplit = mycpv.split("/")
+ psplit = pkgsplit(mysplit[1])
+ if psplit is None or len(mysplit) != 2:
+ raise InvalidPackageName(mycpv)
+
+ # For optimal performace in this hot spot, we do manual unicode
+ # handling here instead of using the wrapped os module.
+ encoding = _encodings['fs']
+ errors = 'strict'
+
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = reversed(self.porttrees)
+
+ relative_path = mysplit[0] + _os.sep + psplit[0] + _os.sep + \
+ mysplit[1] + ".ebuild"
+
+ for x in mytrees:
+ filename = x + _os.sep + relative_path
+ if _os.access(_unicode_encode(filename,
+ encoding=encoding, errors=errors), _os.R_OK):
+ return (filename, x)
+ return (None, 0)
+
+ def _metadata_process(self, cpv, ebuild_path, repo_path):
+ """
+ Create an EbuildMetadataPhase instance to generate metadata for the
+ give ebuild.
+ @rtype: EbuildMetadataPhase
+ @returns: A new EbuildMetadataPhase instance, or None if the
+ metadata cache is already valid.
+ """
+ metadata, st, emtime = self._pull_valid_cache(cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ return None
+
+ process = EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
+ ebuild_mtime=emtime, metadata_callback=self._metadata_callback,
+ portdb=self, repo_path=repo_path, settings=self.doebuild_settings)
+ return process
+
+ def _metadata_callback(self, cpv, ebuild_path, repo_path, metadata, mtime):
+
+ i = metadata
+ if hasattr(metadata, "items"):
+ i = iter(metadata.items())
+ metadata = dict(i)
+
+ if metadata.get("INHERITED", False):
+ metadata["_eclasses_"] = self._repo_info[repo_path
+ ].eclass_db.get_eclass_data(metadata["INHERITED"].split())
+ else:
+ metadata["_eclasses_"] = {}
+
+ metadata.pop("INHERITED", None)
+ metadata["_mtime_"] = mtime
+
+ eapi = metadata.get("EAPI")
+ if not eapi or not eapi.strip():
+ eapi = "0"
+ metadata["EAPI"] = eapi
+ if not eapi_is_supported(eapi):
+ for k in set(metadata).difference(("_mtime_", "_eclasses_")):
+ metadata[k] = ""
+ metadata["EAPI"] = "-" + eapi.lstrip("-")
+
+ try:
+ self.auxdb[repo_path][cpv] = metadata
+ except CacheError:
+ # Normally this shouldn't happen, so we'll show
+ # a traceback for debugging purposes.
+ traceback.print_exc()
+ return metadata
+
+ def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
+ try:
+ # Don't use unicode-wrapped os module, for better performance.
+ st = _os.stat(_unicode_encode(ebuild_path,
+ encoding=_encodings['fs'], errors='strict'))
+ emtime = st[stat.ST_MTIME]
+ except OSError:
+ writemsg(_("!!! aux_get(): ebuild for " \
+ "'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
+ writemsg("!!! %s\n" % ebuild_path, noiselevel=-1)
+ raise KeyError(cpv)
+
+ # Pull pre-generated metadata from the metadata/cache/
+ # directory if it exists and is valid, otherwise fall
+ # back to the normal writable cache.
+ auxdbs = []
+ pregen_auxdb = self._pregen_auxdb.get(repo_path)
+ if pregen_auxdb is not None:
+ auxdbs.append(pregen_auxdb)
+ auxdbs.append(self.auxdb[repo_path])
+ eclass_db = self._repo_info[repo_path].eclass_db
+
+ doregen = True
+ for auxdb in auxdbs:
+ try:
+ metadata = auxdb[cpv]
+ except KeyError:
+ pass
+ except CacheError:
+ if auxdb is not pregen_auxdb:
+ try:
+ del auxdb[cpv]
+ except KeyError:
+ pass
+ except CacheError:
+ pass
+ else:
+ eapi = metadata.get('EAPI', '').strip()
+ if not eapi:
+ eapi = '0'
+ if not (eapi[:1] == '-' and eapi_is_supported(eapi[1:])) and \
+ emtime == metadata['_mtime_'] and \
+ eclass_db.is_eclass_data_valid(metadata['_eclasses_']):
+ doregen = False
+
+ if not doregen:
+ break
+
+ if doregen:
+ metadata = None
+
+ return (metadata, st, emtime)
+
+ def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
+ "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
+ cache_me = False
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ raise KeyError(myrepo)
+
+ if not mytree:
+ cache_me = True
+ if not mytree and not self._known_keys.intersection(
+ mylist).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ return [aux_cache.get(x, "") for x in mylist]
+ cache_me = True
+ global auxdbkeys, auxdbkeylen
+ try:
+ cat, pkg = mycpv.split("/", 1)
+ except ValueError:
+ # Missing slash. Can't find ebuild so raise KeyError.
+ raise KeyError(mycpv)
+
+ myebuild, mylocation = self.findname2(mycpv, mytree)
+
+ if not myebuild:
+ writemsg("!!! aux_get(): %s\n" % \
+ _("ebuild not found for '%s'") % mycpv, noiselevel=1)
+ raise KeyError(mycpv)
+
+ mydata, st, emtime = self._pull_valid_cache(mycpv, myebuild, mylocation)
+ doregen = mydata is None
+
+ if doregen:
+ if myebuild in self._broken_ebuilds:
+ raise KeyError(mycpv)
+
+ self.doebuild_settings.setcpv(mycpv)
+ eapi = None
+
+ if eapi is None and \
+ 'parse-eapi-ebuild-head' in self.doebuild_settings.features:
+ eapi = portage._parse_eapi_ebuild_head(io.open(
+ _unicode_encode(myebuild,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace'))
+
+ if eapi is not None:
+ self.doebuild_settings.configdict['pkg']['EAPI'] = eapi
+
+ if eapi is not None and not portage.eapi_is_supported(eapi):
+ mydata = self._metadata_callback(
+ mycpv, myebuild, mylocation, {'EAPI':eapi}, emtime)
+ else:
+ proc = EbuildMetadataPhase(cpv=mycpv, ebuild_path=myebuild,
+ ebuild_mtime=emtime,
+ metadata_callback=self._metadata_callback, portdb=self,
+ repo_path=mylocation,
+ scheduler=PollScheduler().sched_iface,
+ settings=self.doebuild_settings)
+
+ proc.start()
+ proc.wait()
+
+ if proc.returncode != os.EX_OK:
+ self._broken_ebuilds.add(myebuild)
+ raise KeyError(mycpv)
+
+ mydata = proc.metadata
+
+ # do we have a origin repository name for the current package
+ mydata["repository"] = self.repositories.get_name_for_location(mylocation)
+ mydata["INHERITED"] = ' '.join(mydata.get("_eclasses_", []))
+ mydata["_mtime_"] = st[stat.ST_MTIME]
+
+ eapi = mydata.get("EAPI")
+ if not eapi:
+ eapi = "0"
+ mydata["EAPI"] = eapi
+ if not eapi_is_supported(eapi):
+ for k in set(mydata).difference(("_mtime_", "_eclasses_")):
+ mydata[k] = ""
+ mydata["EAPI"] = "-" + eapi.lstrip("-")
+
+ #finally, we look at our internal cache entry and return the requested data.
+ returnme = [mydata.get(x, "") for x in mylist]
+
+ if cache_me:
+ aux_cache = {}
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, "")
+ self._aux_cache[mycpv] = aux_cache
+
+ return returnme
+
+ def getFetchMap(self, mypkg, useflags=None, mytree=None):
+ """
+ Get the SRC_URI metadata as a dict which maps each file name to a
+ set of alternative URIs.
+
+ @param mypkg: cpv for an ebuild
+ @type mypkg: String
+ @param useflags: a collection of enabled USE flags, for evaluation of
+ conditionals
+ @type useflags: set, or None to enable all conditionals
+ @param mytree: The canonical path of the tree in which the ebuild
+ is located, or None for automatic lookup
+ @type mypkg: String
+ @returns: A dict which maps each file name to a set of alternative
+ URIs.
+ @rtype: dict
+ """
+
+ try:
+ eapi, myuris = self.aux_get(mypkg,
+ ["EAPI", "SRC_URI"], mytree=mytree)
+ except KeyError:
+ # Convert this to an InvalidDependString exception since callers
+ # already handle it.
+ raise portage.exception.InvalidDependString(
+ "getFetchMap(): aux_get() error reading "+mypkg+"; aborting.")
+
+ if not eapi_is_supported(eapi):
+ # Convert this to an InvalidDependString exception
+ # since callers already handle it.
+ raise portage.exception.InvalidDependString(
+ "getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
+ (mypkg, eapi.lstrip("-")))
+
+ return _parse_uri_map(mypkg, {'EAPI':eapi,'SRC_URI':myuris},
+ use=useflags)
+
+ def getfetchsizes(self, mypkg, useflags=None, debug=0, myrepo=None):
+ # returns a filename:size dictionnary of remaining downloads
+ myebuild, mytree = self.findname2(mypkg, myrepo=myrepo)
+ if myebuild is None:
+ raise AssertionError(_("ebuild not found for '%s'") % mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.settings["DISTDIR"])
+ checksums = mf.getDigests()
+ if not checksums:
+ if debug:
+ writemsg(_("[empty/missing/bad digest]: %s\n") % (mypkg,))
+ return {}
+ filesdict={}
+ myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+ #XXX: maybe this should be improved: take partial downloads
+ # into account? check checksums?
+ for myfile in myfiles:
+ try:
+ fetch_size = int(checksums[myfile]["size"])
+ except (KeyError, ValueError):
+ if debug:
+ writemsg(_("[bad digest]: missing %(file)s for %(pkg)s\n") % {"file":myfile, "pkg":mypkg})
+ continue
+ file_path = os.path.join(self.settings["DISTDIR"], myfile)
+ mystat = None
+ try:
+ mystat = os.stat(file_path)
+ except OSError as e:
+ pass
+ if mystat is None:
+ existing_size = 0
+ ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS")
+ if ro_distdirs is not None:
+ for x in shlex_split(ro_distdirs):
+ try:
+ mystat = os.stat(os.path.join(x, myfile))
+ except OSError:
+ pass
+ else:
+ if mystat.st_size == fetch_size:
+ existing_size = fetch_size
+ break
+ else:
+ existing_size = mystat.st_size
+ remaining_size = fetch_size - existing_size
+ if remaining_size > 0:
+ # Assume the download is resumable.
+ filesdict[myfile] = remaining_size
+ elif remaining_size < 0:
+ # The existing file is too large and therefore corrupt.
+ filesdict[myfile] = int(checksums[myfile]["size"])
+ return filesdict
+
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False, myrepo=None):
+ """
+ TODO: account for PORTAGE_RO_DISTDIRS
+ """
+ if all:
+ useflags = None
+ elif useflags is None:
+ if mysettings:
+ useflags = mysettings["USE"].split()
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ return False
+ else:
+ mytree = None
+
+ myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+ myebuild = self.findname(mypkg, myrepo=myrepo)
+ if myebuild is None:
+ raise AssertionError(_("ebuild not found for '%s'") % mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.settings["DISTDIR"])
+ mysums = mf.getDigests()
+
+ failures = {}
+ for x in myfiles:
+ if not mysums or x not in mysums:
+ ok = False
+ reason = _("digest missing")
+ else:
+ try:
+ ok, reason = portage.checksum.verify_all(
+ os.path.join(self.settings["DISTDIR"], x), mysums[x])
+ except FileNotFound as e:
+ ok = False
+ reason = _("File Not Found: '%s'") % (e,)
+ if not ok:
+ failures[x] = reason
+ if failures:
+ return False
+ return True
+
+ def cpv_exists(self, mykey, myrepo=None):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ cps2 = mykey.split("/")
+ cps = catpkgsplit(mykey, silent=0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
+ if self.findname(cps[0] + "/" + cps2[1], myrepo=myrepo):
+ return 1
+ else:
+ return 0
+
+ def cp_all(self, categories=None, trees=None):
+ """
+ This returns a list of all keys in our tree or trees
+ @param categories: optional list of categories to search or
+ defaults to self.settings.categories
+ @param trees: optional list of trees to search the categories in or
+ defaults to self.porttrees
+ @rtype list of [cat/pkg,...]
+ """
+ d = {}
+ if categories is None:
+ categories = self.settings.categories
+ if trees is None:
+ trees = self.porttrees
+ for x in categories:
+ for oroot in trees:
+ for y in listdir(oroot+"/"+x, EmptyOnError=1, ignorecvs=1, dirsonly=1):
+ try:
+ atom = Atom("%s/%s" % (x, y))
+ except InvalidAtom:
+ continue
+ if atom != atom.cp:
+ continue
+ d[atom.cp] = None
+ l = list(d)
+ l.sort()
+ return l
+
+ def cp_list(self, mycp, use_cache=1, mytree=None):
+ if self.frozen and mytree is None:
+ cachelist = self.xcache["cp-list"].get(mycp)
+ if cachelist is not None:
+ # Try to propagate this to the match-all cache here for
+ # repoman since he uses separate match-all caches for each
+ # profile (due to old-style virtuals). Do not propagate
+ # old-style virtuals since cp_list() doesn't expand them.
+ if not (not cachelist and mycp.startswith("virtual/")):
+ self.xcache["match-all"][mycp] = cachelist
+ return cachelist[:]
+ mysplit = mycp.split("/")
+ invalid_category = mysplit[0] not in self._categories
+ d={}
+ if mytree is not None:
+ if isinstance(mytree, basestring):
+ mytrees = [mytree]
+ else:
+ # assume it's iterable
+ mytrees = mytree
+ else:
+ mytrees = self.porttrees
+ for oroot in mytrees:
+ try:
+ file_list = os.listdir(os.path.join(oroot, mycp))
+ except OSError:
+ continue
+ for x in file_list:
+ pf = None
+ if x[-7:] == '.ebuild':
+ pf = x[:-7]
+
+ if pf is not None:
+ ps = pkgsplit(pf)
+ if not ps:
+ writemsg(_("\nInvalid ebuild name: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ if ps[0] != mysplit[1]:
+ writemsg(_("\nInvalid ebuild name: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ ver_match = ver_regexp.match("-".join(ps[1:]))
+ if ver_match is None or not ver_match.groups():
+ writemsg(_("\nInvalid ebuild version: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ d[mysplit[0]+"/"+pf] = None
+ if invalid_category and d:
+ writemsg(_("\n!!! '%s' has a category that is not listed in " \
+ "%setc/portage/categories\n") % \
+ (mycp, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1)
+ mylist = []
+ else:
+ mylist = list(d)
+ # Always sort in ascending order here since it's handy
+ # and the result can be easily cached and reused.
+ self._cpv_sort_ascending(mylist)
+ if self.frozen and mytree is None:
+ cachelist = mylist[:]
+ self.xcache["cp-list"][mycp] = cachelist
+ # Do not propagate old-style virtuals since
+ # cp_list() doesn't expand them.
+ if not (not cachelist and mycp.startswith("virtual/")):
+ self.xcache["match-all"][mycp] = cachelist
+ return mylist
+
+ def freeze(self):
+ for x in "bestmatch-visible", "cp-list", "list-visible", "match-all", \
+ "match-all-cpv-only", "match-visible", "minimum-all", \
+ "minimum-visible":
+ self.xcache[x]={}
+ self.frozen=1
+
+ def melt(self):
+ self.xcache = {}
+ self.frozen = 0
+
+ def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+ "caching match function; very trick stuff"
+ #if no updates are being made to the tree, we can consult our xcache...
+ if self.frozen:
+ try:
+ return self.xcache[level][origdep][:]
+ except KeyError:
+ pass
+
+ if mydep is None:
+ #this stuff only runs on first call of xmatch()
+ #create mydep, mykey from origdep
+ mydep = dep_expand(origdep, mydb=self, settings=self.settings)
+ mykey = mydep.cp
+
+ myval = None
+ mytree = None
+ if mydep.repo is not None:
+ mytree = self.treemap.get(mydep.repo)
+ if mytree is None:
+ myval = []
+
+ if myval is not None:
+ # Unknown repo, empty result.
+ pass
+ elif level == "match-all-cpv-only":
+ # match *all* packages, only against the cpv, in order
+ # to bypass unnecessary cache access for things like IUSE
+ # and SLOT.
+ if mydep == mykey:
+ # Share cache with match-all/cp_list when the result is the
+ # same. Note that this requires that mydep.repo is None and
+ # thus mytree is also None.
+ level = "match-all"
+ myval = self.cp_list(mykey, mytree=mytree)
+ else:
+ myval = match_from_list(mydep,
+ self.cp_list(mykey, mytree=mytree))
+
+ elif level == "list-visible":
+ #a list of all visible packages, not called directly (just by xmatch())
+ #myval = self.visible(self.cp_list(mykey))
+
+ myval = self.gvisible(self.visible(
+ self.cp_list(mykey, mytree=mytree)))
+ elif level == "minimum-all":
+ # Find the minimum matching version. This is optimized to
+ # minimize the number of metadata accesses (improves performance
+ # especially in cases where metadata needs to be generated).
+ if mydep == mykey:
+ cpv_iter = iter(self.cp_list(mykey, mytree=mytree))
+ else:
+ cpv_iter = self._iter_match(mydep,
+ self.cp_list(mykey, mytree=mytree))
+ try:
+ myval = next(cpv_iter)
+ except StopIteration:
+ myval = ""
+
+ elif level in ("minimum-visible", "bestmatch-visible"):
+ # Find the minimum matching visible version. This is optimized to
+ # minimize the number of metadata accesses (improves performance
+ # especially in cases where metadata needs to be generated).
+ if mydep == mykey:
+ mylist = self.cp_list(mykey, mytree=mytree)
+ else:
+ mylist = match_from_list(mydep,
+ self.cp_list(mykey, mytree=mytree))
+ myval = ""
+ settings = self.settings
+ local_config = settings.local_config
+ aux_keys = list(self._aux_cache_keys)
+ if level == "minimum-visible":
+ iterfunc = iter
+ else:
+ iterfunc = reversed
+ for cpv in iterfunc(mylist):
+ try:
+ metadata = dict(zip(aux_keys,
+ self.aux_get(cpv, aux_keys)))
+ except KeyError:
+ # ebuild masked by corruption
+ continue
+ if not eapi_is_supported(metadata["EAPI"]):
+ continue
+ if mydep.slot and mydep.slot != metadata["SLOT"]:
+ continue
+ if settings._getMissingKeywords(cpv, metadata):
+ continue
+ if settings._getMaskAtom(cpv, metadata):
+ continue
+ if settings._getProfileMaskAtom(cpv, metadata):
+ continue
+ if local_config:
+ metadata["USE"] = ""
+ if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
+ self.doebuild_settings.setcpv(cpv, mydb=metadata)
+ metadata["USE"] = self.doebuild_settings.get("USE", "")
+ try:
+ if settings._getMissingLicenses(cpv, metadata):
+ continue
+ if settings._getMissingProperties(cpv, metadata):
+ continue
+ except InvalidDependString:
+ continue
+ if mydep.use:
+ has_iuse = False
+ for has_iuse in self._iter_match_use(mydep, [cpv]):
+ break
+ if not has_iuse:
+ continue
+ myval = cpv
+ break
+ elif level == "bestmatch-list":
+ #dep match -- find best match but restrict search to sublist
+ #no point in calling xmatch again since we're not caching list deps
+
+ myval = best(list(self._iter_match(mydep, mylist)))
+ elif level == "match-list":
+ #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+
+ myval = list(self._iter_match(mydep, mylist))
+ elif level == "match-visible":
+ #dep match -- find all visible matches
+ #get all visible packages, then get the matching ones
+ myval = list(self._iter_match(mydep,
+ self.xmatch("list-visible", mykey, mydep=Atom(mykey), mykey=mykey)))
+ elif level == "match-all":
+ #match *all* visible *and* masked packages
+ if mydep == mykey:
+ myval = self.cp_list(mykey, mytree=mytree)
+ else:
+ myval = list(self._iter_match(mydep,
+ self.cp_list(mykey, mytree=mytree)))
+ else:
+ raise AssertionError(
+ "Invalid level argument: '%s'" % level)
+
+ if self.frozen and (level not in ["match-list", "bestmatch-list"]):
+ self.xcache[level][mydep] = myval
+ if origdep and origdep != mydep:
+ self.xcache[level][origdep] = myval
+ return myval[:]
+
+ def match(self, mydep, use_cache=1):
+ return self.xmatch("match-visible", mydep)
+
+ def visible(self, mylist):
+ """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
+ packages file to remove invisible entries, returning remaining items. This function assumes
+ that all entries in mylist have the same category and package name."""
+ if not mylist:
+ return []
+
+ db_keys = ["SLOT"]
+ visible = []
+ getMaskAtom = self.settings._getMaskAtom
+ getProfileMaskAtom = self.settings._getProfileMaskAtom
+ for cpv in mylist:
+ try:
+ metadata = dict(zip(db_keys, self.aux_get(cpv, db_keys)))
+ except KeyError:
+ # masked by corruption
+ continue
+ if not metadata["SLOT"]:
+ continue
+ if getMaskAtom(cpv, metadata):
+ continue
+ if getProfileMaskAtom(cpv, metadata):
+ continue
+ visible.append(cpv)
+ return visible
+
+ def gvisible(self,mylist):
+ "strip out group-masked (not in current group) entries"
+
+ if mylist is None:
+ return []
+ newlist=[]
+ aux_keys = list(self._aux_cache_keys)
+ metadata = {}
+ local_config = self.settings.local_config
+ chost = self.settings.get('CHOST', '')
+ accept_chost = self.settings._accept_chost
+ for mycpv in mylist:
+ metadata.clear()
+ try:
+ metadata.update(zip(aux_keys, self.aux_get(mycpv, aux_keys)))
+ except KeyError:
+ continue
+ except PortageException as e:
+ writemsg("!!! Error: aux_get('%s', %s)\n" % (mycpv, aux_keys),
+ noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ del e
+ continue
+ eapi = metadata["EAPI"]
+ if not eapi_is_supported(eapi):
+ continue
+ if _eapi_is_deprecated(eapi):
+ continue
+ if self.settings._getMissingKeywords(mycpv, metadata):
+ continue
+ if local_config:
+ metadata['CHOST'] = chost
+ if not accept_chost(mycpv, metadata):
+ continue
+ metadata["USE"] = ""
+ if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
+ self.doebuild_settings.setcpv(mycpv, mydb=metadata)
+ metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
+ try:
+ if self.settings._getMissingLicenses(mycpv, metadata):
+ continue
+ if self.settings._getMissingProperties(mycpv, metadata):
+ continue
+ except InvalidDependString:
+ continue
+ newlist.append(mycpv)
+ return newlist
+
+def close_portdbapi_caches():
+ for i in portdbapi.portdbapi_instances:
+ i.close_caches()
+
+portage.process.atexit_register(portage.portageexit)
+
+class portagetree(object):
+ def __init__(self, root=None, virtual=None, settings=None):
+ """
+ Constructor for a PortageTree
+
+ @param root: deprecated, defaults to settings['ROOT']
+ @type root: String/Path
+ @param virtual: UNUSED
+ @type virtual: No Idea
+ @param settings: Portage Configuration object (portage.settings)
+ @type settings: Instance of portage.config
+ """
+
+ if settings is None:
+ settings = portage.settings
+ self.settings = settings
+
+ if root is not None and root != settings['ROOT']:
+ warnings.warn("The root parameter of the " + \
+ "portage.dbapi.porttree.portagetree" + \
+ " constructor is now unused. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.portroot = settings["PORTDIR"]
+ self.virtual = virtual
+ self.dbapi = portdbapi(mysettings=settings)
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of " + \
+ "portage.dbapi.porttree.portagetree" + \
+ " is deprecated. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+ return self.settings['ROOT']
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method"
+ mymatch = self.dbapi.xmatch("bestmatch-visible",mydep)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def dep_match(self,mydep):
+ "compatibility method"
+ mymatch = self.dbapi.xmatch("match-visible",mydep)
+ if mymatch is None:
+ return []
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getname(self, pkgname):
+ "returns file location for this particular package (DEPRECATED)"
+ if not pkgname:
+ return ""
+ mysplit = pkgname.split("/")
+ psplit = pkgsplit(mysplit[1])
+ return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild"
+
+ def depcheck(self, mycheck, use="yes", myusesplit=None):
+ return dep_check(mycheck, self.dbapi, use=use, myuse=myusesplit)
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ pass
+ return myslot
+
+class FetchlistDict(Mapping):
+ """
+ This provide a mapping interface to retrieve fetch lists. It's used
+ to allow portage.manifest.Manifest to access fetch lists via a standard
+ mapping interface rather than use the dbapi directly.
+ """
+ def __init__(self, pkgdir, settings, mydbapi):
+ """pkgdir is a directory containing ebuilds and settings is passed into
+ portdbapi.getfetchlist for __getitem__ calls."""
+ self.pkgdir = pkgdir
+ self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
+ self.settings = settings
+ self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
+ self.portdb = mydbapi
+
+ def __getitem__(self, pkg_key):
+ """Returns the complete fetch list for a given package."""
+ return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
+
+ def __contains__(self, cpv):
+ return cpv in self.__iter__()
+
+ def has_key(self, pkg_key):
+ """Returns true if the given package exists within pkgdir."""
+ warnings.warn("portage.dbapi.porttree.FetchlistDict.has_key() is "
+ "deprecated, use the 'in' operator instead",
+ DeprecationWarning, stacklevel=2)
+ return pkg_key in self
+
+ def __iter__(self):
+ return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
+
+ def __len__(self):
+ """This needs to be implemented in order to avoid
+ infinite recursion in some cases."""
+ return len(self.portdb.cp_list(self.cp, mytree=self.mytree))
+
+ def keys(self):
+ """Returns keys for all packages within pkgdir"""
+ return self.portdb.cp_list(self.cp, mytree=self.mytree)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+def _parse_uri_map(cpv, metadata, use=None):
+
+ myuris = use_reduce(metadata.get('SRC_URI', ''),
+ uselist=use, matchall=(use is None),
+ is_src_uri=True,
+ eapi=metadata['EAPI'])
+
+ uri_map = OrderedDict()
+
+ myuris.reverse()
+ while myuris:
+ uri = myuris.pop()
+ if myuris and myuris[-1] == "->":
+ operator = myuris.pop()
+ distfile = myuris.pop()
+ else:
+ distfile = os.path.basename(uri)
+ if not distfile:
+ raise portage.exception.InvalidDependString(
+ ("getFetchMap(): '%s' SRC_URI has no file " + \
+ "name: '%s'") % (cpv, uri))
+
+ uri_set = uri_map.get(distfile)
+ if uri_set is None:
+ uri_set = set()
+ uri_map[distfile] = uri_set
+ uri_set.add(uri)
+ uri = None
+ operator = None
+
+ return uri_map
diff --git a/portage_with_autodep/pym/portage/dbapi/vartree.py b/portage_with_autodep/pym/portage/dbapi/vartree.py
new file mode 100644
index 0000000..7f7873b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/vartree.py
@@ -0,0 +1,4527 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+ "vardbapi", "vartree", "dblink"] + \
+ ["write_contents", "tar_contents"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum:_perform_md5_merge@perform_md5',
+ 'portage.data:portage_gid,portage_uid,secpass',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dbapi._MergeProcess:MergeProcess',
+ 'portage.dep:dep_getkey,isjustname,match_from_list,' + \
+ 'use_reduce,_slot_re',
+ 'portage.elog:collect_ebuild_messages,collect_messages,' + \
+ 'elog_process,_merge_logentries',
+ 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
+ 'portage.output:bold,colorize',
+ 'portage.package.ebuild.doebuild:doebuild_environment,' + \
+ '_spawn_phase',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+ 'portage.update:fixdbentries',
+ 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
+ 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
+ 'grabdict,normalize_path,new_protect_filename',
+ 'portage.util.digraph:digraph',
+ 'portage.util.env_update:env_update',
+ 'portage.util.listdir:dircache,listdir',
+ 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
+ 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
+ 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \
+ '_pkgsplit@pkgsplit',
+)
+
+from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
+ PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
+from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
+from portage.dbapi import dbapi
+from portage.exception import CommandNotFound, \
+ InvalidData, InvalidLocation, InvalidPackageName, \
+ FileNotFound, PermissionDenied, UnsupportedAPIException
+from portage.localization import _
+from portage.util.movefile import movefile
+
+from portage import abssymlink, _movefile, bsd_chflags
+
+# This is a special version of the os module, wrapped for unicode support.
+from portage import os
+from portage import _encodings
+from portage import _os_merge
+from portage import _selinux_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.PollScheduler import PollScheduler
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+
+import errno
+import gc
+import io
+from itertools import chain
+import logging
+import os as _os
+import re
+import shutil
+import stat
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class vardbapi(dbapi):
+
+ _excluded_dirs = ["CVS", "lost+found"]
+ _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
+ _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
+ "|".join(_excluded_dirs) + r')$')
+
+ _aux_cache_version = "1"
+ _owners_cache_version = "1"
+
+ # Number of uncached packages to trigger cache update, since
+ # it's wasteful to update it for every vdb change.
+ _aux_cache_threshold = 5
+
+ _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
+ _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
+
+ def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
+ """
+ The categories parameter is unused since the dbapi class
+ now has a categories property that is generated from the
+ available packages.
+ """
+
+ # Used by emerge to check whether any packages
+ # have been added or removed.
+ self._pkgs_changed = False
+
+ # The _aux_cache_threshold doesn't work as designed
+ # if the cache is flushed from a subprocess, so we
+ # use this to avoid waste vdb cache updates.
+ self._flush_cache_enabled = True
+
+ #cache for category directory mtimes
+ self.mtdircache = {}
+
+ #cache for dependency checks
+ self.matchcache = {}
+
+ #cache for cp_list results
+ self.cpcache = {}
+
+ self.blockers = None
+ if settings is None:
+ settings = portage.settings
+ self.settings = settings
+ self.root = settings['ROOT']
+
+ if _unused_param is not None and _unused_param != self.root:
+ warnings.warn("The first parameter of the " + \
+ "portage.dbapi.vartree.vardbapi" + \
+ " constructor is now unused. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self._eroot = settings['EROOT']
+ self._dbroot = self._eroot + VDB_PATH
+ self._lock = None
+ self._lock_count = 0
+
+ self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
+ self._fs_lock_obj = None
+ self._fs_lock_count = 0
+
+ if vartree is None:
+ vartree = portage.db[self.root]["vartree"]
+ self.vartree = vartree
+ self._aux_cache_keys = set(
+ ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
+ "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
+ "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
+ "REQUIRED_USE"])
+ self._aux_cache_obj = None
+ self._aux_cache_filename = os.path.join(self._eroot,
+ CACHE_PATH, "vdb_metadata.pickle")
+ self._counter_path = os.path.join(self._eroot,
+ CACHE_PATH, "counter")
+
+ self._plib_registry = None
+ if _ENABLE_PRESERVE_LIBS:
+ self._plib_registry = PreservedLibsRegistry(self.root,
+ os.path.join(self._eroot, PRIVATE_PATH,
+ "preserved_libs_registry"))
+
+ self._linkmap = None
+ if _ENABLE_DYN_LINK_MAP:
+ self._linkmap = LinkageMap(self)
+ self._owners = self._owners_db(self)
+
+ self._cached_counter = None
+
+ def getpath(self, mykey, filename=None):
+ # This is an optimized hotspot, so don't use unicode-wrapped
+ # os module and don't use os.path.join().
+ rValue = self._eroot + VDB_PATH + _os.sep + mykey
+ if filename is not None:
+ # If filename is always relative, we can do just
+ # rValue += _os.sep + filename
+ rValue = _os.path.join(rValue, filename)
+ return rValue
+
+ def lock(self):
+ """
+ Acquire a reentrant lock, blocking, for cooperation with concurrent
+ processes. State is inherited by subprocesses, allowing subprocesses
+ to reenter a lock that was acquired by a parent process. However,
+ a lock can be released only by the same process that acquired it.
+ """
+ if self._lock_count:
+ self._lock_count += 1
+ else:
+ if self._lock is not None:
+ raise AssertionError("already locked")
+ # At least the parent needs to exist for the lock file.
+ ensure_dirs(self._dbroot)
+ self._lock = lockdir(self._dbroot)
+ self._lock_count += 1
+
+ def unlock(self):
+ """
+ Release a lock, decrementing the recursion level. Each unlock() call
+ must be matched with a prior lock() call, or else an AssertionError
+ will be raised if unlock() is called while not locked.
+ """
+ if self._lock_count > 1:
+ self._lock_count -= 1
+ else:
+ if self._lock is None:
+ raise AssertionError("not locked")
+ self._lock_count = 0
+ unlockdir(self._lock)
+ self._lock = None
+
+ def _fs_lock(self):
+ """
+ Acquire a reentrant lock, blocking, for cooperation with concurrent
+ processes.
+ """
+ if self._fs_lock_count < 1:
+ if self._fs_lock_obj is not None:
+ raise AssertionError("already locked")
+ try:
+ self._fs_lock_obj = lockfile(self._conf_mem_file)
+ except InvalidLocation:
+ self.settings._init_dirs()
+ self._fs_lock_obj = lockfile(self._conf_mem_file)
+ self._fs_lock_count += 1
+
+ def _fs_unlock(self):
+ """
+ Release a lock, decrementing the recursion level.
+ """
+ if self._fs_lock_count <= 1:
+ if self._fs_lock_obj is None:
+ raise AssertionError("not locked")
+ unlockfile(self._fs_lock_obj)
+ self._fs_lock_obj = None
+ self._fs_lock_count -= 1
+
+ def _bump_mtime(self, cpv):
+ """
+ This is called before an after any modifications, so that consumers
+ can use directory mtimes to validate caches. See bug #290428.
+ """
+ base = self._eroot + VDB_PATH
+ cat = catsplit(cpv)[0]
+ catdir = base + _os.sep + cat
+ t = time.time()
+ t = (t, t)
+ try:
+ for x in (catdir, base):
+ os.utime(x, t)
+ except OSError:
+ ensure_dirs(catdir)
+
+ def cpv_exists(self, mykey, myrepo=None):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ return os.path.exists(self.getpath(mykey))
+
+ def cpv_counter(self, mycpv):
+ "This method will grab the COUNTER. Returns a counter value."
+ try:
+ return long(self.aux_get(mycpv, ["COUNTER"])[0])
+ except (KeyError, ValueError):
+ pass
+ writemsg_level(_("portage: COUNTER for %s was corrupted; " \
+ "resetting to value of 0\n") % (mycpv,),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+
+ def cpv_inject(self, mycpv):
+ "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
+ ensure_dirs(self.getpath(mycpv))
+ counter = self.counter_tick(mycpv=mycpv)
+ # write local package counter so that emerge clean does the right thing
+ write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
+
+ def isInjected(self, mycpv):
+ if self.cpv_exists(mycpv):
+ if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
+ return True
+ if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
+ return True
+ return False
+
+ def move_ent(self, mylist, repo_match=None):
+ origcp = mylist[1]
+ newcp = mylist[2]
+
+ # sanity check
+ for atom in (origcp, newcp):
+ if not isjustname(atom):
+ raise InvalidPackageName(str(atom))
+ origmatches = self.match(origcp, use_cache=0)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ mycpv_cp = cpv_getkey(mycpv)
+ if mycpv_cp != origcp:
+ # Ignore PROVIDE virtual match.
+ continue
+ if repo_match is not None \
+ and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
+ continue
+ mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
+ mynewcat = catsplit(newcp)[0]
+ origpath = self.getpath(mycpv)
+ if not os.path.exists(origpath):
+ continue
+ moves += 1
+ if not os.path.exists(self.getpath(mynewcat)):
+ #create the directory
+ ensure_dirs(self.getpath(mynewcat))
+ newpath = self.getpath(mynewcpv)
+ if os.path.exists(newpath):
+ #dest already exists; keep this puppy where it is.
+ continue
+ _movefile(origpath, newpath, mysettings=self.settings)
+ self._clear_pkg_cache(self._dblink(mycpv))
+ self._clear_pkg_cache(self._dblink(mynewcpv))
+
+ # We need to rename the ebuild now.
+ old_pf = catsplit(mycpv)[1]
+ new_pf = catsplit(mynewcpv)[1]
+ if new_pf != old_pf:
+ try:
+ os.rename(os.path.join(newpath, old_pf + ".ebuild"),
+ os.path.join(newpath, new_pf + ".ebuild"))
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
+ write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
+ fixdbentries([mylist], newpath)
+ return moves
+
+ def cp_list(self, mycp, use_cache=1):
+ mysplit=catsplit(mycp)
+ if mysplit[0] == '*':
+ mysplit[0] = mysplit[0][1:]
+ try:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime
+ except OSError:
+ mystat = 0
+ if use_cache and mycp in self.cpcache:
+ cpc = self.cpcache[mycp]
+ if cpc[0] == mystat:
+ return cpc[1][:]
+ cat_dir = self.getpath(mysplit[0])
+ try:
+ dir_list = os.listdir(cat_dir)
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(cat_dir)
+ del e
+ dir_list = []
+
+ returnme = []
+ for x in dir_list:
+ if self._excluded_dirs.match(x) is not None:
+ continue
+ ps = pkgsplit(x)
+ if not ps:
+ self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
+ continue
+ if len(mysplit) > 1:
+ if ps[0] == mysplit[1]:
+ returnme.append(mysplit[0]+"/"+x)
+ self._cpv_sort_ascending(returnme)
+ if use_cache:
+ self.cpcache[mycp] = [mystat, returnme[:]]
+ elif mycp in self.cpcache:
+ del self.cpcache[mycp]
+ return returnme
+
+ def cpv_all(self, use_cache=1):
+ """
+ Set use_cache=0 to bypass the portage.cachedir() cache in cases
+ when the accuracy of mtime staleness checks should not be trusted
+ (generally this is only necessary in critical sections that
+ involve merge or unmerge of packages).
+ """
+ returnme = []
+ basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
+
+ if use_cache:
+ from portage import listdir
+ else:
+ def listdir(p, **kwargs):
+ try:
+ return [x for x in os.listdir(p) \
+ if os.path.isdir(os.path.join(p, x))]
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(p)
+ del e
+ return []
+
+ for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
+ if self._excluded_dirs.match(x) is not None:
+ continue
+ if not self._category_re.match(x):
+ continue
+ for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
+ if self._excluded_dirs.match(y) is not None:
+ continue
+ subpath = x + "/" + y
+ # -MERGING- should never be a cpv, nor should files.
+ try:
+ if catpkgsplit(subpath) is None:
+ self.invalidentry(self.getpath(subpath))
+ continue
+ except InvalidData:
+ self.invalidentry(self.getpath(subpath))
+ continue
+ returnme.append(subpath)
+
+ return returnme
+
+ def cp_all(self, use_cache=1):
+ mylist = self.cpv_all(use_cache=use_cache)
+ d={}
+ for y in mylist:
+ if y[0] == '*':
+ y = y[1:]
+ try:
+ mysplit = catpkgsplit(y)
+ except InvalidData:
+ self.invalidentry(self.getpath(y))
+ continue
+ if not mysplit:
+ self.invalidentry(self.getpath(y))
+ continue
+ d[mysplit[0]+"/"+mysplit[1]] = None
+ return list(d)
+
+ def checkblockers(self, origdep):
+ pass
+
+ def _clear_cache(self):
+ self.mtdircache.clear()
+ self.matchcache.clear()
+ self.cpcache.clear()
+ self._aux_cache_obj = None
+
+ def _add(self, pkg_dblink):
+ self._pkgs_changed = True
+ self._clear_pkg_cache(pkg_dblink)
+
+ def _remove(self, pkg_dblink):
+ self._pkgs_changed = True
+ self._clear_pkg_cache(pkg_dblink)
+
+ def _clear_pkg_cache(self, pkg_dblink):
+ # Due to 1 second mtime granularity in <python-2.5, mtime checks
+ # are not always sufficient to invalidate vardbapi caches. Therefore,
+ # the caches need to be actively invalidated here.
+ self.mtdircache.pop(pkg_dblink.cat, None)
+ self.matchcache.pop(pkg_dblink.cat, None)
+ self.cpcache.pop(pkg_dblink.mysplit[0], None)
+ dircache.pop(pkg_dblink.dbcatdir, None)
+
+ def match(self, origdep, use_cache=1):
+ "caching match function"
+ mydep = dep_expand(
+ origdep, mydb=self, use_cache=use_cache, settings=self.settings)
+ mykey = dep_getkey(mydep)
+ mycat = catsplit(mykey)[0]
+ if not use_cache:
+ if mycat in self.matchcache:
+ del self.mtdircache[mycat]
+ del self.matchcache[mycat]
+ return list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+ try:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
+ except (IOError, OSError):
+ curmtime=0
+
+ if mycat not in self.matchcache or \
+ self.mtdircache[mycat] != curmtime:
+ # clear cache entry
+ self.mtdircache[mycat] = curmtime
+ self.matchcache[mycat] = {}
+ if mydep not in self.matchcache[mycat]:
+ mymatch = list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+ self.matchcache[mycat][mydep] = mymatch
+ return self.matchcache[mycat][mydep][:]
+
+ def findname(self, mycpv, myrepo=None):
+ return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
+
+ def flush_cache(self):
+ """If the current user has permission and the internal aux_get cache has
+ been updated, save it to disk and mark it unmodified. This is called
+ by emerge after it has loaded the full vdb for use in dependency
+ calculations. Currently, the cache is only written if the user has
+ superuser privileges (since that's required to obtain a lock), but all
+ users have read access and benefit from faster metadata lookups (as
+ long as at least part of the cache is still valid)."""
+ if self._flush_cache_enabled and \
+ self._aux_cache is not None and \
+ len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
+ secpass >= 2:
+ self._owners.populate() # index any unindexed contents
+ valid_nodes = set(self.cpv_all())
+ for cpv in list(self._aux_cache["packages"]):
+ if cpv not in valid_nodes:
+ del self._aux_cache["packages"][cpv]
+ del self._aux_cache["modified"]
+ try:
+ f = atomic_ofstream(self._aux_cache_filename, 'wb')
+ pickle.dump(self._aux_cache, f, protocol=2)
+ f.close()
+ apply_secpass_permissions(
+ self._aux_cache_filename, gid=portage_gid, mode=0o644)
+ except (IOError, OSError) as e:
+ pass
+ self._aux_cache["modified"] = set()
+
+ @property
+ def _aux_cache(self):
+ if self._aux_cache_obj is None:
+ self._aux_cache_init()
+ return self._aux_cache_obj
+
+ def _aux_cache_init(self):
+ aux_cache = None
+ open_kwargs = {}
+ if sys.hexversion >= 0x3000000:
+ # Buffered io triggers extreme performance issues in
+ # Unpickler.load() (problem observed with python-3.0.1).
+ # Unfortunately, performance is still poor relative to
+ # python-2.x, but buffering makes it much worse.
+ open_kwargs["buffering"] = 0
+ try:
+ f = open(_unicode_encode(self._aux_cache_filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='rb', **open_kwargs)
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ aux_cache = mypickle.load()
+ f.close()
+ del f
+ except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
+ if isinstance(e, pickle.UnpicklingError):
+ writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
+ (self._aux_cache_filename, e), noiselevel=-1)
+ del e
+
+ if not aux_cache or \
+ not isinstance(aux_cache, dict) or \
+ aux_cache.get("version") != self._aux_cache_version or \
+ not aux_cache.get("packages"):
+ aux_cache = {"version": self._aux_cache_version}
+ aux_cache["packages"] = {}
+
+ owners = aux_cache.get("owners")
+ if owners is not None:
+ if not isinstance(owners, dict):
+ owners = None
+ elif "version" not in owners:
+ owners = None
+ elif owners["version"] != self._owners_cache_version:
+ owners = None
+ elif "base_names" not in owners:
+ owners = None
+ elif not isinstance(owners["base_names"], dict):
+ owners = None
+
+ if owners is None:
+ owners = {
+ "base_names" : {},
+ "version" : self._owners_cache_version
+ }
+ aux_cache["owners"] = owners
+
+ aux_cache["modified"] = set()
+ self._aux_cache_obj = aux_cache
+
+ def aux_get(self, mycpv, wants, myrepo = None):
+ """This automatically caches selected keys that are frequently needed
+ by emerge for dependency calculations. The cached metadata is
+ considered valid if the mtime of the package directory has not changed
+ since the data was cached. The cache is stored in a pickled dict
+ object with the following format:
+
+ {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
+
+ If an error occurs while loading the cache pickle or the version is
+ unrecognized, the cache will simple be recreated from scratch (it is
+ completely disposable).
+ """
+ cache_these_wants = self._aux_cache_keys.intersection(wants)
+ for x in wants:
+ if self._aux_cache_keys_re.match(x) is not None:
+ cache_these_wants.add(x)
+
+ if not cache_these_wants:
+ return self._aux_get(mycpv, wants)
+
+ cache_these = set(self._aux_cache_keys)
+ cache_these.update(cache_these_wants)
+
+ mydir = self.getpath(mycpv)
+ mydir_stat = None
+ try:
+ mydir_stat = os.stat(mydir)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise KeyError(mycpv)
+ mydir_mtime = mydir_stat[stat.ST_MTIME]
+ pkg_data = self._aux_cache["packages"].get(mycpv)
+ pull_me = cache_these.union(wants)
+ mydata = {"_mtime_" : mydir_mtime}
+ cache_valid = False
+ cache_incomplete = False
+ cache_mtime = None
+ metadata = None
+ if pkg_data is not None:
+ if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
+ pkg_data = None
+ else:
+ cache_mtime, metadata = pkg_data
+ if not isinstance(cache_mtime, (long, int)) or \
+ not isinstance(metadata, dict):
+ pkg_data = None
+
+ if pkg_data:
+ cache_mtime, metadata = pkg_data
+ cache_valid = cache_mtime == mydir_mtime
+ if cache_valid:
+ # Migrate old metadata to unicode.
+ for k, v in metadata.items():
+ metadata[k] = _unicode_decode(v,
+ encoding=_encodings['repo.content'], errors='replace')
+
+ mydata.update(metadata)
+ pull_me.difference_update(mydata)
+
+ if pull_me:
+ # pull any needed data and cache it
+ aux_keys = list(pull_me)
+ for k, v in zip(aux_keys,
+ self._aux_get(mycpv, aux_keys, st=mydir_stat)):
+ mydata[k] = v
+ if not cache_valid or cache_these.difference(metadata):
+ cache_data = {}
+ if cache_valid and metadata:
+ cache_data.update(metadata)
+ for aux_key in cache_these:
+ cache_data[aux_key] = mydata[aux_key]
+ self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
+ self._aux_cache["modified"].add(mycpv)
+
+ if _slot_re.match(mydata['SLOT']) is None:
+ # Empty or invalid slot triggers InvalidAtom exceptions when
+ # generating slot atoms for packages, so translate it to '0' here.
+ mydata['SLOT'] = _unicode_decode('0')
+
+ return [mydata[x] for x in wants]
+
+ def _aux_get(self, mycpv, wants, st=None):
+ mydir = self.getpath(mycpv)
+ if st is None:
+ try:
+ st = os.stat(mydir)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise KeyError(mycpv)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(mydir)
+ else:
+ raise
+ if not stat.S_ISDIR(st.st_mode):
+ raise KeyError(mycpv)
+ results = []
+ for x in wants:
+ if x == "_mtime_":
+ results.append(st[stat.ST_MTIME])
+ continue
+ try:
+ myf = io.open(
+ _unicode_encode(os.path.join(mydir, x),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ try:
+ myd = myf.read()
+ finally:
+ myf.close()
+ # Preserve \n for metadata that is known to
+ # contain multiple lines.
+ if self._aux_multi_line_re.match(x) is None:
+ myd = " ".join(myd.split())
+ except IOError:
+ myd = _unicode_decode('')
+ if x == "EAPI" and not myd:
+ results.append(_unicode_decode('0'))
+ else:
+ results.append(myd)
+ return results
+
+ def aux_update(self, cpv, values):
+ mylink = self._dblink(cpv)
+ if not mylink.exists():
+ raise KeyError(cpv)
+ self._bump_mtime(cpv)
+ self._clear_pkg_cache(mylink)
+ for k, v in values.items():
+ if v:
+ mylink.setfile(k, v)
+ else:
+ try:
+ os.unlink(os.path.join(self.getpath(cpv), k))
+ except EnvironmentError:
+ pass
+ self._bump_mtime(cpv)
+
+ def counter_tick(self, myroot=None, mycpv=None):
+ """
+ @param myroot: ignored, self._eroot is used instead
+ """
+ return self.counter_tick_core(incrementing=1, mycpv=mycpv)
+
+ def get_counter_tick_core(self, myroot=None, mycpv=None):
+ """
+ Use this method to retrieve the counter instead
+ of having to trust the value of a global counter
+ file that can lead to invalid COUNTER
+ generation. When cache is valid, the package COUNTER
+ files are not read and we rely on the timestamp of
+ the package directory to validate cache. The stat
+ calls should only take a short time, so performance
+ is sufficient without having to rely on a potentially
+ corrupt global counter file.
+
+ The global counter file located at
+ $CACHE_PATH/counter serves to record the
+ counter of the last installed package and
+ it also corresponds to the total number of
+ installation actions that have occurred in
+ the history of this package database.
+
+ @param myroot: ignored, self._eroot is used instead
+ """
+ myroot = None
+ new_vdb = False
+ counter = -1
+ try:
+ cfile = io.open(
+ _unicode_encode(self._counter_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError as e:
+ new_vdb = not bool(self.cpv_all())
+ if not new_vdb:
+ writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+ else:
+ try:
+ try:
+ counter = long(cfile.readline().strip())
+ finally:
+ cfile.close()
+ except (OverflowError, ValueError) as e:
+ writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+
+ if self._cached_counter == counter:
+ max_counter = counter
+ else:
+ # We must ensure that we return a counter
+ # value that is at least as large as the
+ # highest one from the installed packages,
+ # since having a corrupt value that is too low
+ # can trigger incorrect AUTOCLEAN behavior due
+ # to newly installed packages having lower
+ # COUNTERs than the previous version in the
+ # same slot.
+ max_counter = counter
+ for cpv in self.cpv_all():
+ try:
+ pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
+ except (KeyError, OverflowError, ValueError):
+ continue
+ if pkg_counter > max_counter:
+ max_counter = pkg_counter
+
+ if counter < 0 and not new_vdb:
+ writemsg(_("!!! Initializing COUNTER to " \
+ "value of %d\n") % max_counter, noiselevel=-1)
+
+ return max_counter + 1
+
+ def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
+ """
+ This method will grab the next COUNTER value and record it back
+ to the global file. Note that every package install must have
+ a unique counter, since a slotmove update can move two packages
+ into the same SLOT and in that case it's important that both
+ packages have different COUNTER metadata.
+
+ @param myroot: ignored, self._eroot is used instead
+ @param mycpv: ignored
+ @rtype: int
+ @returns: new counter value
+ """
+ myroot = None
+ mycpv = None
+ self.lock()
+ try:
+ counter = self.get_counter_tick_core() - 1
+ if incrementing:
+ #increment counter
+ counter += 1
+ # update new global counter file
+ try:
+ write_atomic(self._counter_path, str(counter))
+ except InvalidLocation:
+ self.settings._init_dirs()
+ write_atomic(self._counter_path, str(counter))
+ self._cached_counter = counter
+
+ # Since we hold a lock, this is a good opportunity
+ # to flush the cache. Note that this will only
+ # flush the cache periodically in the main process
+ # when _aux_cache_threshold is exceeded.
+ self.flush_cache()
+ finally:
+ self.unlock()
+
+ return counter
+
+ def _dblink(self, cpv):
+ category, pf = catsplit(cpv)
+ return dblink(category, pf, settings=self.settings,
+ vartree=self.vartree, treetype="vartree")
+
+ def removeFromContents(self, pkg, paths, relative_paths=True):
+ """
+ @param pkg: cpv for an installed package
+ @type pkg: string
+ @param paths: paths of files to remove from contents
+ @type paths: iterable
+ """
+ if not hasattr(pkg, "getcontents"):
+ pkg = self._dblink(pkg)
+ root = self.settings['ROOT']
+ root_len = len(root) - 1
+ new_contents = pkg.getcontents().copy()
+ removed = 0
+
+ for filename in paths:
+ filename = _unicode_decode(filename,
+ encoding=_encodings['content'], errors='strict')
+ filename = normalize_path(filename)
+ if relative_paths:
+ relative_filename = filename
+ else:
+ relative_filename = filename[root_len:]
+ contents_key = pkg._match_contents(relative_filename)
+ if contents_key:
+ del new_contents[contents_key]
+ removed += 1
+
+ if removed:
+ self._bump_mtime(pkg.mycpv)
+ f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
+ write_contents(new_contents, root, f)
+ f.close()
+ self._bump_mtime(pkg.mycpv)
+ pkg._clear_contents_cache()
+
+ class _owners_cache(object):
+ """
+ This class maintains an hash table that serves to index package
+ contents by mapping the basename of file to a list of possible
+ packages that own it. This is used to optimize owner lookups
+ by narrowing the search down to a smaller number of packages.
+ """
+ try:
+ from hashlib import md5 as _new_hash
+ except ImportError:
+ from md5 import new as _new_hash
+
+ _hash_bits = 16
+ _hex_chars = int(_hash_bits / 4)
+
+ def __init__(self, vardb):
+ self._vardb = vardb
+
+ def add(self, cpv):
+ eroot_len = len(self._vardb._eroot)
+ contents = self._vardb._dblink(cpv).getcontents()
+ pkg_hash = self._hash_pkg(cpv)
+ if not contents:
+ # Empty path is a code used to represent empty contents.
+ self._add_path("", pkg_hash)
+
+ for x in contents:
+ self._add_path(x[eroot_len:], pkg_hash)
+
+ self._vardb._aux_cache["modified"].add(cpv)
+
+ def _add_path(self, path, pkg_hash):
+ """
+ Empty path is a code that represents empty contents.
+ """
+ if path:
+ name = os.path.basename(path.rstrip(os.path.sep))
+ if not name:
+ return
+ else:
+ name = path
+ name_hash = self._hash_str(name)
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+ pkgs = base_names.get(name_hash)
+ if pkgs is None:
+ pkgs = {}
+ base_names[name_hash] = pkgs
+ pkgs[pkg_hash] = None
+
+ def _hash_str(self, s):
+ h = self._new_hash()
+ # Always use a constant utf_8 encoding here, since
+ # the "default" encoding can change.
+ h.update(_unicode_encode(s,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace'))
+ h = h.hexdigest()
+ h = h[-self._hex_chars:]
+ h = int(h, 16)
+ return h
+
+ def _hash_pkg(self, cpv):
+ counter, mtime = self._vardb.aux_get(
+ cpv, ["COUNTER", "_mtime_"])
+ try:
+ counter = int(counter)
+ except ValueError:
+ counter = 0
+ return (cpv, counter, mtime)
+
+ class _owners_db(object):
+
+ def __init__(self, vardb):
+ self._vardb = vardb
+
+ def populate(self):
+ self._populate()
+
+ def _populate(self):
+ owners_cache = vardbapi._owners_cache(self._vardb)
+ cached_hashes = set()
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+
+ # Take inventory of all cached package hashes.
+ for name, hash_values in list(base_names.items()):
+ if not isinstance(hash_values, dict):
+ del base_names[name]
+ continue
+ cached_hashes.update(hash_values)
+
+ # Create sets of valid package hashes and uncached packages.
+ uncached_pkgs = set()
+ hash_pkg = owners_cache._hash_pkg
+ valid_pkg_hashes = set()
+ for cpv in self._vardb.cpv_all():
+ hash_value = hash_pkg(cpv)
+ valid_pkg_hashes.add(hash_value)
+ if hash_value not in cached_hashes:
+ uncached_pkgs.add(cpv)
+
+ # Cache any missing packages.
+ for cpv in uncached_pkgs:
+ owners_cache.add(cpv)
+
+ # Delete any stale cache.
+ stale_hashes = cached_hashes.difference(valid_pkg_hashes)
+ if stale_hashes:
+ for base_name_hash, bucket in list(base_names.items()):
+ for hash_value in stale_hashes.intersection(bucket):
+ del bucket[hash_value]
+ if not bucket:
+ del base_names[base_name_hash]
+
+ return owners_cache
+
+ def get_owners(self, path_iter):
+ """
+ @return the owners as a dblink -> set(files) mapping.
+ """
+ owners = {}
+ for owner, f in self.iter_owners(path_iter):
+ owned_files = owners.get(owner)
+ if owned_files is None:
+ owned_files = set()
+ owners[owner] = owned_files
+ owned_files.add(f)
+ return owners
+
+ def getFileOwnerMap(self, path_iter):
+ owners = self.get_owners(path_iter)
+ file_owners = {}
+ for pkg_dblink, files in owners.items():
+ for f in files:
+ owner_set = file_owners.get(f)
+ if owner_set is None:
+ owner_set = set()
+ file_owners[f] = owner_set
+ owner_set.add(pkg_dblink)
+ return file_owners
+
+ def iter_owners(self, path_iter):
+ """
+ Iterate over tuples of (dblink, path). In order to avoid
+ consuming too many resources for too much time, resources
+ are only allocated for the duration of a given iter_owners()
+ call. Therefore, to maximize reuse of resources when searching
+ for multiple files, it's best to search for them all in a single
+ call.
+ """
+
+ if not isinstance(path_iter, list):
+ path_iter = list(path_iter)
+ owners_cache = self._populate()
+ vardb = self._vardb
+ root = vardb._eroot
+ hash_pkg = owners_cache._hash_pkg
+ hash_str = owners_cache._hash_str
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+
+ dblink_cache = {}
+
+ def dblink(cpv):
+ x = dblink_cache.get(cpv)
+ if x is None:
+ if len(dblink_cache) > 20:
+ # Ensure that we don't run out of memory.
+ raise StopIteration()
+ x = self._vardb._dblink(cpv)
+ dblink_cache[cpv] = x
+ return x
+
+ while path_iter:
+
+ path = path_iter.pop()
+ is_basename = os.sep != path[:1]
+ if is_basename:
+ name = path
+ else:
+ name = os.path.basename(path.rstrip(os.path.sep))
+
+ if not name:
+ continue
+
+ name_hash = hash_str(name)
+ pkgs = base_names.get(name_hash)
+ owners = []
+ if pkgs is not None:
+ try:
+ for hash_value in pkgs:
+ if not isinstance(hash_value, tuple) or \
+ len(hash_value) != 3:
+ continue
+ cpv, counter, mtime = hash_value
+ if not isinstance(cpv, basestring):
+ continue
+ try:
+ current_hash = hash_pkg(cpv)
+ except KeyError:
+ continue
+
+ if current_hash != hash_value:
+ continue
+
+ if is_basename:
+ for p in dblink(cpv).getcontents():
+ if os.path.basename(p) == name:
+ owners.append((cpv, p[len(root):]))
+ else:
+ if dblink(cpv).isowner(path):
+ owners.append((cpv, path))
+
+ except StopIteration:
+ path_iter.append(path)
+ del owners[:]
+ dblink_cache.clear()
+ gc.collect()
+ for x in self._iter_owners_low_mem(path_iter):
+ yield x
+ return
+ else:
+ for cpv, p in owners:
+ yield (dblink(cpv), p)
+
+ def _iter_owners_low_mem(self, path_list):
+ """
+ This implemention will make a short-lived dblink instance (and
+ parse CONTENTS) for every single installed package. This is
+ slower and but uses less memory than the method which uses the
+ basename cache.
+ """
+
+ if not path_list:
+ return
+
+ path_info_list = []
+ for path in path_list:
+ is_basename = os.sep != path[:1]
+ if is_basename:
+ name = path
+ else:
+ name = os.path.basename(path.rstrip(os.path.sep))
+ path_info_list.append((path, name, is_basename))
+
+ root = self._vardb._eroot
+ for cpv in self._vardb.cpv_all():
+ dblnk = self._vardb._dblink(cpv)
+
+ for path, name, is_basename in path_info_list:
+ if is_basename:
+ for p in dblnk.getcontents():
+ if os.path.basename(p) == name:
+ yield dblnk, p[len(root):]
+ else:
+ if dblnk.isowner(path):
+ yield dblnk, path
+
+class vartree(object):
+ "this tree will scan a var/db/pkg database located at root (passed to init)"
+ def __init__(self, root=None, virtual=None, categories=None,
+ settings=None):
+
+ if settings is None:
+ settings = portage.settings
+ self.root = settings['ROOT']
+
+ if root is not None and root != self.root:
+ warnings.warn("The 'root' parameter of the " + \
+ "portage.dbapi.vartree.vartree" + \
+ " constructor is now unused. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.settings = settings
+ self.dbapi = vardbapi(settings=settings, vartree=self)
+ self.populated = 1
+
+ def getpath(self, mykey, filename=None):
+ return self.dbapi.getpath(mykey, filename=filename)
+
+ def zap(self, mycpv):
+ return
+
+ def inject(self, mycpv):
+ return
+
+ def get_provide(self, mycpv):
+ myprovides = []
+ mylines = None
+ try:
+ mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
+ if mylines:
+ myuse = myuse.split()
+ mylines = use_reduce(mylines, uselist=myuse, flat=True)
+ for myprovide in mylines:
+ mys = catpkgsplit(myprovide)
+ if not mys:
+ mys = myprovide.split("/")
+ myprovides += [mys[0] + "/" + mys[1]]
+ return myprovides
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ mydir = self.dbapi.getpath(mycpv)
+ writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
+ noiselevel=-1)
+ if mylines:
+ writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
+ noiselevel=-1)
+ writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
+ return []
+
+ def get_all_provides(self):
+ myprovides = {}
+ for node in self.getallcpv():
+ for mykey in self.get_provide(node):
+ if mykey in myprovides:
+ myprovides[mykey] += [node]
+ else:
+ myprovides[mykey] = [node]
+ return myprovides
+
+ def dep_bestmatch(self, mydep, use_cache=1):
+ "compatibility method -- all matches, not just visible ones"
+ #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
+ mymatch = best(self.dbapi.match(
+ dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
+ use_cache=use_cache))
+ if mymatch is None:
+ return ""
+ else:
+ return mymatch
+
+ def dep_match(self, mydep, use_cache=1):
+ "compatibility method -- we want to see all matches, not just visible ones"
+ #mymatch = match(mydep,self.dbapi)
+ mymatch = self.dbapi.match(mydep, use_cache=use_cache)
+ if mymatch is None:
+ return []
+ else:
+ return mymatch
+
+ def exists_specific(self, cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallcpv(self):
+ """temporary function, probably to be renamed --- Gets a list of all
+ category/package-versions installed on the system."""
+ return self.dbapi.cpv_all()
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getebuildpath(self, fullpackage):
+ cat, package = catsplit(fullpackage)
+ return self.getpath(fullpackage, filename=package+".ebuild")
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ try:
+ return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+ except KeyError:
+ return ""
+
+ def populate(self):
+ self.populated=1
+
+class dblink(object):
+ """
+ This class provides an interface to the installed package database
+ At present this is implemented as a text backend in /var/db/pkg.
+ """
+
+ import re
+ _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
+
+ _contents_re = re.compile(r'^(' + \
+ r'(?P<dir>(dev|dir|fif) (.+))|' + \
+ r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
+ r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
+ r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
+ r')$'
+ )
+
+ def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
+ vartree=None, blockers=None, scheduler=None, pipe=None):
+ """
+ Creates a DBlink object for a given CPV.
+ The given CPV may not be present in the database already.
+
+ @param cat: Category
+ @type cat: String
+ @param pkg: Package (PV)
+ @type pkg: String
+ @param myroot: ignored, settings['ROOT'] is used instead
+ @type myroot: String (Path)
+ @param settings: Typically portage.settings
+ @type settings: portage.config
+ @param treetype: one of ['porttree','bintree','vartree']
+ @type treetype: String
+ @param vartree: an instance of vartree corresponding to myroot.
+ @type vartree: vartree
+ """
+
+ if settings is None:
+ raise TypeError("settings argument is required")
+
+ mysettings = settings
+ myroot = settings['ROOT']
+ self.cat = cat
+ self.pkg = pkg
+ self.mycpv = self.cat + "/" + self.pkg
+ self.mysplit = list(catpkgsplit(self.mycpv)[1:])
+ self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
+ self.treetype = treetype
+ if vartree is None:
+ vartree = portage.db[myroot]["vartree"]
+ self.vartree = vartree
+ self._blockers = blockers
+ self._scheduler = scheduler
+
+ # WARNING: EROOT support is experimental and may be incomplete
+ # for cases in which EPREFIX is non-empty.
+ self._eroot = mysettings['EROOT']
+ self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
+ self.dbcatdir = self.dbroot+"/"+cat
+ self.dbpkgdir = self.dbcatdir+"/"+pkg
+ self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
+ self.dbdir = self.dbpkgdir
+ self.settings = mysettings
+ self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
+
+ self.myroot=myroot
+ self._installed_instance = None
+ self.contentscache = None
+ self._contents_inodes = None
+ self._contents_basenames = None
+ self._linkmap_broken = False
+ self._md5_merge_map = {}
+ self._hash_key = (self.myroot, self.mycpv)
+ self._protect_obj = None
+ self._pipe = pipe
+
+ def __hash__(self):
+ return hash(self._hash_key)
+
+ def __eq__(self, other):
+ return isinstance(other, dblink) and \
+ self._hash_key == other._hash_key
+
+ def _get_protect_obj(self):
+
+ if self._protect_obj is None:
+ self._protect_obj = ConfigProtect(self._eroot,
+ portage.util.shlex_split(
+ self.settings.get("CONFIG_PROTECT", "")),
+ portage.util.shlex_split(
+ self.settings.get("CONFIG_PROTECT_MASK", "")))
+
+ return self._protect_obj
+
+ def isprotected(self, obj):
+ return self._get_protect_obj().isprotected(obj)
+
+ def updateprotect(self):
+ self._get_protect_obj().updateprotect()
+
+ def lockdb(self):
+ self.vartree.dbapi.lock()
+
+ def unlockdb(self):
+ self.vartree.dbapi.unlock()
+
+ def getpath(self):
+ "return path to location of db information (for >>> informational display)"
+ return self.dbdir
+
+ def exists(self):
+ "does the db entry exist? boolean."
+ return os.path.exists(self.dbdir)
+
+ def delete(self):
+ """
+ Remove this entry from the database
+ """
+ if not os.path.exists(self.dbdir):
+ return
+
+ # Check validity of self.dbdir before attempting to remove it.
+ if not self.dbdir.startswith(self.dbroot):
+ writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
+ self.dbdir, noiselevel=-1)
+ return
+
+ shutil.rmtree(self.dbdir)
+ # If empty, remove parent category directory.
+ try:
+ os.rmdir(os.path.dirname(self.dbdir))
+ except OSError:
+ pass
+ self.vartree.dbapi._remove(self)
+
+ def clearcontents(self):
+ """
+ For a given db entry (self), erase the CONTENTS values.
+ """
+ self.lockdb()
+ try:
+ if os.path.exists(self.dbdir+"/CONTENTS"):
+ os.unlink(self.dbdir+"/CONTENTS")
+ finally:
+ self.unlockdb()
+
+ def _clear_contents_cache(self):
+ self.contentscache = None
+ self._contents_inodes = None
+ self._contents_basenames = None
+
+ def getcontents(self):
+ """
+ Get the installed files of a given package (aka what that package installed)
+ """
+ contents_file = os.path.join(self.dbdir, "CONTENTS")
+ if self.contentscache is not None:
+ return self.contentscache
+ pkgfiles = {}
+ try:
+ myc = io.open(_unicode_encode(contents_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ self.contentscache = pkgfiles
+ return pkgfiles
+ mylines = myc.readlines()
+ myc.close()
+ null_byte = "\0"
+ normalize_needed = self._normalize_needed
+ contents_re = self._contents_re
+ obj_index = contents_re.groupindex['obj']
+ dir_index = contents_re.groupindex['dir']
+ sym_index = contents_re.groupindex['sym']
+ # The old symlink format may exist on systems that have packages
+ # which were installed many years ago (see bug #351814).
+ oldsym_index = contents_re.groupindex['oldsym']
+ # CONTENTS files already contain EPREFIX
+ myroot = self.settings['ROOT']
+ if myroot == os.path.sep:
+ myroot = None
+ # used to generate parent dir entries
+ dir_entry = (_unicode_decode("dir"),)
+ eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
+ pos = 0
+ errors = []
+ for pos, line in enumerate(mylines):
+ if null_byte in line:
+ # Null bytes are a common indication of corruption.
+ errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
+ continue
+ line = line.rstrip("\n")
+ m = contents_re.match(line)
+ if m is None:
+ errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
+ continue
+
+ if m.group(obj_index) is not None:
+ base = obj_index
+ #format: type, mtime, md5sum
+ data = (m.group(base+1), m.group(base+4), m.group(base+3))
+ elif m.group(dir_index) is not None:
+ base = dir_index
+ #format: type
+ data = (m.group(base+1),)
+ elif m.group(sym_index) is not None:
+ base = sym_index
+ if m.group(oldsym_index) is None:
+ mtime = m.group(base+5)
+ else:
+ mtime = m.group(base+8)
+ #format: type, mtime, dest
+ data = (m.group(base+1), mtime, m.group(base+3))
+ else:
+ # This won't happen as long the regular expression
+ # is written to only match valid entries.
+ raise AssertionError(_("required group not found " + \
+ "in CONTENTS entry: '%s'") % line)
+
+ path = m.group(base+2)
+ if normalize_needed.search(path) is not None:
+ path = normalize_path(path)
+ if not path.startswith(os.path.sep):
+ path = os.path.sep + path
+
+ if myroot is not None:
+ path = os.path.join(myroot, path.lstrip(os.path.sep))
+
+ # Implicitly add parent directories, since we can't necessarily
+ # assume that they are explicitly listed in CONTENTS, and it's
+ # useful for callers if they can rely on parent directory entries
+ # being generated here (crucial for things like dblink.isowner()).
+ path_split = path.split(os.sep)
+ path_split.pop()
+ while len(path_split) > eroot_split_len:
+ parent = os.sep.join(path_split)
+ if parent in pkgfiles:
+ break
+ pkgfiles[parent] = dir_entry
+ path_split.pop()
+
+ pkgfiles[path] = data
+
+ if errors:
+ writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
+ for pos, e in errors:
+ writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
+ self.contentscache = pkgfiles
+ return pkgfiles
+
+ def _prune_plib_registry(self, unmerge=False,
+ needed=None, preserve_paths=None):
+ # remove preserved libraries that don't have any consumers left
+ if not (self._linkmap_broken or
+ self.vartree.dbapi._linkmap is None or
+ self.vartree.dbapi._plib_registry is None):
+ self.vartree.dbapi._fs_lock()
+ plib_registry = self.vartree.dbapi._plib_registry
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+
+ unmerge_with_replacement = \
+ unmerge and preserve_paths is not None
+ if unmerge_with_replacement:
+ # If self.mycpv is about to be unmerged and we
+ # have a replacement package, we want to exclude
+ # the irrelevant NEEDED data that belongs to
+ # files which are being unmerged now.
+ exclude_pkgs = (self.mycpv,)
+ else:
+ exclude_pkgs = None
+
+ self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
+ include_file=needed, preserve_paths=preserve_paths)
+
+ if unmerge:
+ unmerge_preserve = None
+ if not unmerge_with_replacement:
+ unmerge_preserve = \
+ self._find_libs_to_preserve(unmerge=True)
+ counter = self.vartree.dbapi.cpv_counter(self.mycpv)
+ plib_registry.unregister(self.mycpv,
+ self.settings["SLOT"], counter)
+ if unmerge_preserve:
+ for path in sorted(unmerge_preserve):
+ contents_key = self._match_contents(path)
+ if not contents_key:
+ continue
+ obj_type = self.getcontents()[contents_key][0]
+ self._display_merge(_(">>> needed %s %s\n") % \
+ (obj_type, contents_key), noiselevel=-1)
+ plib_registry.register(self.mycpv,
+ self.settings["SLOT"], counter, unmerge_preserve)
+ # Remove the preserved files from our contents
+ # so that they won't be unmerged.
+ self.vartree.dbapi.removeFromContents(self,
+ unmerge_preserve)
+
+ unmerge_no_replacement = \
+ unmerge and not unmerge_with_replacement
+ cpv_lib_map = self._find_unused_preserved_libs(
+ unmerge_no_replacement)
+ if cpv_lib_map:
+ self._remove_preserved_libs(cpv_lib_map)
+ self.vartree.dbapi.lock()
+ try:
+ for cpv, removed in cpv_lib_map.items():
+ if not self.vartree.dbapi.cpv_exists(cpv):
+ continue
+ self.vartree.dbapi.removeFromContents(cpv, removed)
+ finally:
+ self.vartree.dbapi.unlock()
+
+ plib_registry.store()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
+ ldpath_mtimes=None, others_in_slot=None, needed=None,
+ preserve_paths=None):
+ """
+ Calls prerm
+ Unmerges a given package (CPV)
+ calls postrm
+ calls cleanrm
+ calls env_update
+
+ @param pkgfiles: files to unmerge (generally self.getcontents() )
+ @type pkgfiles: Dictionary
+ @param trimworld: Unused
+ @type trimworld: Boolean
+ @param cleanup: cleanup to pass to doebuild (see doebuild)
+ @type cleanup: Boolean
+ @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
+ @type ldpath_mtimes: Dictionary
+ @param others_in_slot: all dblink instances in this slot, excluding self
+ @type others_in_slot: list
+ @param needed: Filename containing libraries needed after unmerge.
+ @type needed: String
+ @param preserve_paths: Libraries preserved by a package instance that
+ is currently being merged. They need to be explicitly passed to the
+ LinkageMap, since they are not registered in the
+ PreservedLibsRegistry yet.
+ @type preserve_paths: set
+ @rtype: Integer
+ @returns:
+ 1. os.EX_OK if everything went well.
+ 2. return code of the failed phase (for prerm, postrm, cleanrm)
+ """
+
+ if trimworld is not None:
+ warnings.warn("The trimworld parameter of the " + \
+ "portage.dbapi.vartree.dblink.unmerge()" + \
+ " method is now unused.",
+ DeprecationWarning, stacklevel=2)
+
+ background = False
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ if self._scheduler is None:
+ # We create a scheduler instance and use it to
+ # log unmerge output separately from merge output.
+ self._scheduler = PollScheduler().sched_iface
+ if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
+ if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
+ self.settings["PORTAGE_BACKGROUND"] = "1"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+ background = True
+ elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
+ self.settings["PORTAGE_BACKGROUND"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+ elif self.settings.get("PORTAGE_BACKGROUND") == "1":
+ background = True
+
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ showMessage = self._display_merge
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+ # When others_in_slot is supplied, the security check has already been
+ # done for this slot, so it shouldn't be repeated until the next
+ # replacement or unmerge operation.
+ if others_in_slot is None:
+ slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+ slot_matches = self.vartree.dbapi.match(
+ "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
+ others_in_slot = []
+ for cur_cpv in slot_matches:
+ if cur_cpv == self.mycpv:
+ continue
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=self.settings, vartree=self.vartree,
+ treetype="vartree", pipe=self._pipe))
+
+ retval = self._security_check([self] + others_in_slot)
+ if retval:
+ return retval
+
+ contents = self.getcontents()
+ # Now, don't assume that the name of the ebuild is the same as the
+ # name of the dir; the package may have been moved.
+ myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
+ failures = 0
+ ebuild_phase = "prerm"
+ mystuff = os.listdir(self.dbdir)
+ for x in mystuff:
+ if x.endswith(".ebuild"):
+ if x[:-7] != self.pkg:
+ # Clean up after vardbapi.move_ent() breakage in
+ # portage versions before 2.1.2
+ os.rename(os.path.join(self.dbdir, x), myebuildpath)
+ write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
+ break
+
+ if self.mycpv != self.settings.mycpv or \
+ "EAPI" not in self.settings.configdict["pkg"]:
+ # We avoid a redundant setcpv call here when
+ # the caller has already taken care of it.
+ self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
+
+ eapi_unsupported = False
+ try:
+ doebuild_environment(myebuildpath, "prerm",
+ settings=self.settings, db=self.vartree.dbapi)
+ except UnsupportedAPIException as e:
+ eapi_unsupported = e
+
+ self._prune_plib_registry(unmerge=True, needed=needed,
+ preserve_paths=preserve_paths)
+
+ builddir_lock = None
+ scheduler = self._scheduler
+ retval = os.EX_OK
+ try:
+ # Only create builddir_lock if the caller
+ # has not already acquired the lock.
+ if "PORTAGE_BUILDIR_LOCKED" not in self.settings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=scheduler,
+ settings=self.settings)
+ builddir_lock.lock()
+ prepare_build_dirs(settings=self.settings, cleanup=True)
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+
+ # Log the error after PORTAGE_LOG_FILE is initialized
+ # by prepare_build_dirs above.
+ if eapi_unsupported:
+ # Sometimes this happens due to corruption of the EAPI file.
+ failures += 1
+ showMessage(_("!!! FAILED prerm: %s\n") % \
+ os.path.join(self.dbdir, "EAPI"),
+ level=logging.ERROR, noiselevel=-1)
+ showMessage(_unicode_decode("%s\n") % (eapi_unsupported,),
+ level=logging.ERROR, noiselevel=-1)
+ elif os.path.isfile(myebuildpath):
+ phase = EbuildPhase(background=background,
+ phase=ebuild_phase, scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ failures += 1
+ showMessage(_("!!! FAILED prerm: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+
+ self.vartree.dbapi._fs_lock()
+ try:
+ self._unmerge_pkgfiles(pkgfiles, others_in_slot)
+ finally:
+ self.vartree.dbapi._fs_unlock()
+ self._clear_contents_cache()
+
+ if not eapi_unsupported and os.path.isfile(myebuildpath):
+ ebuild_phase = "postrm"
+ phase = EbuildPhase(background=background,
+ phase=ebuild_phase, scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ failures += 1
+ showMessage(_("!!! FAILED postrm: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+
+ finally:
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ try:
+ if not eapi_unsupported and os.path.isfile(myebuildpath):
+ if retval != os.EX_OK:
+ msg_lines = []
+ msg = _("The '%(ebuild_phase)s' "
+ "phase of the '%(cpv)s' package "
+ "has failed with exit value %(retval)s.") % \
+ {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
+ "retval":retval}
+ from textwrap import wrap
+ msg_lines.extend(wrap(msg, 72))
+ msg_lines.append("")
+
+ ebuild_name = os.path.basename(myebuildpath)
+ ebuild_dir = os.path.dirname(myebuildpath)
+ msg = _("The problem occurred while executing "
+ "the ebuild file named '%(ebuild_name)s' "
+ "located in the '%(ebuild_dir)s' directory. "
+ "If necessary, manually remove "
+ "the environment.bz2 file and/or the "
+ "ebuild file located in that directory.") % \
+ {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
+ msg_lines.extend(wrap(msg, 72))
+ msg_lines.append("")
+
+ msg = _("Removal "
+ "of the environment.bz2 file is "
+ "preferred since it may allow the "
+ "removal phases to execute successfully. "
+ "The ebuild will be "
+ "sourced and the eclasses "
+ "from the current portage tree will be used "
+ "when necessary. Removal of "
+ "the ebuild file will cause the "
+ "pkg_prerm() and pkg_postrm() removal "
+ "phases to be skipped entirely.")
+ msg_lines.extend(wrap(msg, 72))
+
+ self._eerror(ebuild_phase, msg_lines)
+
+ self._elog_process(phasefilter=("prerm", "postrm"))
+
+ if retval == os.EX_OK:
+ try:
+ doebuild_environment(myebuildpath, "cleanrm",
+ settings=self.settings, db=self.vartree.dbapi)
+ except UnsupportedAPIException:
+ pass
+ phase = EbuildPhase(background=background,
+ phase="cleanrm", scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+ finally:
+ if builddir_lock is not None:
+ builddir_lock.unlock()
+
+ if log_path is not None:
+
+ if not failures and 'unmerge-logs' not in self.settings.features:
+ try:
+ os.unlink(log_path)
+ except OSError:
+ pass
+
+ try:
+ st = os.stat(log_path)
+ except OSError:
+ pass
+ else:
+ if st.st_size == 0:
+ try:
+ os.unlink(log_path)
+ except OSError:
+ pass
+
+ if log_path is not None and os.path.exists(log_path):
+ # Restore this since it gets lost somewhere above and it
+ # needs to be set for _display_merge() to be able to log.
+ # Note that the log isn't necessarily supposed to exist
+ # since if PORT_LOGDIR is unset then it's a temp file
+ # so it gets cleaned above.
+ self.settings["PORTAGE_LOG_FILE"] = log_path
+ else:
+ self.settings.pop("PORTAGE_LOG_FILE", None)
+
+ # Lock the config memory file to prevent symlink creation
+ # in merge_contents from overlapping with env-update.
+ self.vartree.dbapi._fs_lock()
+ try:
+ env_update(target_root=self.settings['ROOT'],
+ prev_mtimes=ldpath_mtimes,
+ contents=contents, env=self.settings.environ(),
+ writemsg_level=self._display_merge)
+ finally:
+ self.vartree.dbapi._fs_unlock()
+
+ return os.EX_OK
+
+ def _display_merge(self, msg, level=0, noiselevel=0):
+ if not self._verbose and noiselevel >= 0 and level < logging.WARN:
+ return
+ if self._scheduler is None:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ background = self.settings.get("PORTAGE_BACKGROUND") == "1"
+
+ if background and log_path is None:
+ if level >= logging.WARN:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+ self._scheduler.output(msg,
+ log_path=log_path, background=background,
+ level=level, noiselevel=noiselevel)
+
+ def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
+ """
+
+ Unmerges the contents of a package from the liveFS
+ Removes the VDB entry for self
+
+ @param pkgfiles: typically self.getcontents()
+ @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
+ @param others_in_slot: all dblink instances in this slot, excluding self
+ @type others_in_slot: list
+ @rtype: None
+ """
+
+ os = _os_merge
+ perf_md5 = perform_md5
+ showMessage = self._display_merge
+
+ if not pkgfiles:
+ showMessage(_("No package files given... Grabbing a set.\n"))
+ pkgfiles = self.getcontents()
+
+ if others_in_slot is None:
+ others_in_slot = []
+ slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+ slot_matches = self.vartree.dbapi.match(
+ "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
+ for cur_cpv in slot_matches:
+ if cur_cpv == self.mycpv:
+ continue
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=self.settings,
+ vartree=self.vartree, treetype="vartree", pipe=self._pipe))
+
+ dest_root = self._eroot
+ dest_root_len = len(dest_root) - 1
+
+ cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
+ stale_confmem = []
+ protected_symlinks = {}
+
+ unmerge_orphans = "unmerge-orphans" in self.settings.features
+ calc_prelink = "prelink-checksums" in self.settings.features
+
+ if pkgfiles:
+ self.updateprotect()
+ mykeys = list(pkgfiles)
+ mykeys.sort()
+ mykeys.reverse()
+
+ #process symlinks second-to-last, directories last.
+ mydirs = set()
+ ignored_unlink_errnos = (
+ errno.EBUSY, errno.ENOENT,
+ errno.ENOTDIR, errno.EISDIR)
+ ignored_rmdir_errnos = (
+ errno.EEXIST, errno.ENOTEMPTY,
+ errno.EBUSY, errno.ENOENT,
+ errno.ENOTDIR, errno.EISDIR,
+ errno.EPERM)
+ modprotect = os.path.join(self._eroot, "lib/modules/")
+
+ def unlink(file_name, lstatobj):
+ if bsd_chflags:
+ if lstatobj.st_flags != 0:
+ bsd_chflags.lchflags(file_name, 0)
+ parent_name = os.path.dirname(file_name)
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = os.stat(parent_name).st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(parent_name, 0)
+ try:
+ if not stat.S_ISLNK(lstatobj.st_mode):
+ # Remove permissions to ensure that any hardlinks to
+ # suid/sgid files are rendered harmless.
+ os.chmod(file_name, 0)
+ os.unlink(file_name)
+ except OSError as ose:
+ # If the chmod or unlink fails, you are in trouble.
+ # With Prefix this can be because the file is owned
+ # by someone else (a screwup by root?), on a normal
+ # system maybe filesystem corruption. In any case,
+ # if we backtrace and die here, we leave the system
+ # in a totally undefined state, hence we just bleed
+ # like hell and continue to hopefully finish all our
+ # administrative and pkg_postinst stuff.
+ self._eerror("postrm",
+ ["Could not chmod or unlink '%s': %s" % \
+ (file_name, ose)])
+ finally:
+ if bsd_chflags and pflags != 0:
+ # Restore the parent flags we saved before unlinking
+ bsd_chflags.chflags(parent_name, pflags)
+
+ def show_unmerge(zing, desc, file_type, file_name):
+ showMessage("%s %s %s %s\n" % \
+ (zing, desc.ljust(8), file_type, file_name))
+
+ unmerge_desc = {}
+ unmerge_desc["cfgpro"] = _("cfgpro")
+ unmerge_desc["replaced"] = _("replaced")
+ unmerge_desc["!dir"] = _("!dir")
+ unmerge_desc["!empty"] = _("!empty")
+ unmerge_desc["!fif"] = _("!fif")
+ unmerge_desc["!found"] = _("!found")
+ unmerge_desc["!md5"] = _("!md5")
+ unmerge_desc["!mtime"] = _("!mtime")
+ unmerge_desc["!obj"] = _("!obj")
+ unmerge_desc["!sym"] = _("!sym")
+
+ real_root = self.settings['ROOT']
+ real_root_len = len(real_root) - 1
+ eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
+
+ # These files are generated by emerge, so we need to remove
+ # them when they are the only thing left in a directory.
+ infodir_cleanup = frozenset(["dir", "dir.old"])
+ infodirs = frozenset(infodir for infodir in chain(
+ self.settings.get("INFOPATH", "").split(":"),
+ self.settings.get("INFODIR", "").split(":")) if infodir)
+ infodirs_inodes = set()
+ for infodir in infodirs:
+ infodir = os.path.join(real_root, infodir.lstrip(os.sep))
+ try:
+ statobj = os.stat(infodir)
+ except OSError:
+ pass
+ else:
+ infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
+
+ for i, objkey in enumerate(mykeys):
+
+ obj = normalize_path(objkey)
+ if os is _os_merge:
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+ perf_md5 = portage.checksum.perform_md5
+
+ file_data = pkgfiles[objkey]
+ file_type = file_data[0]
+ statobj = None
+ try:
+ statobj = os.stat(obj)
+ except OSError:
+ pass
+ lstatobj = None
+ try:
+ lstatobj = os.lstat(obj)
+ except (OSError, AttributeError):
+ pass
+ islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
+ if lstatobj is None:
+ show_unmerge("---", unmerge_desc["!found"], file_type, obj)
+ continue
+ # don't use EROOT, CONTENTS entries already contain EPREFIX
+ if obj.startswith(real_root):
+ relative_path = obj[real_root_len:]
+ is_owned = False
+ for dblnk in others_in_slot:
+ if dblnk.isowner(relative_path):
+ is_owned = True
+ break
+
+ if file_type == "sym" and is_owned and \
+ (islink and statobj and stat.S_ISDIR(statobj.st_mode)):
+ # A new instance of this package claims the file, so
+ # don't unmerge it. If the file is symlink to a
+ # directory and the unmerging package installed it as
+ # a symlink, but the new owner has it listed as a
+ # directory, then we'll produce a warning since the
+ # symlink is a sort of orphan in this case (see
+ # bug #326685).
+ symlink_orphan = False
+ for dblnk in others_in_slot:
+ parent_contents_key = \
+ dblnk._match_contents(relative_path)
+ if not parent_contents_key:
+ continue
+ if not parent_contents_key.startswith(
+ real_root):
+ continue
+ if dblnk.getcontents()[
+ parent_contents_key][0] == "dir":
+ symlink_orphan = True
+ break
+
+ if symlink_orphan:
+ protected_symlinks.setdefault(
+ (statobj.st_dev, statobj.st_ino),
+ []).append(relative_path)
+
+ if is_owned:
+ show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
+ continue
+ elif relative_path in cfgfiledict:
+ stale_confmem.append(relative_path)
+ # next line includes a tweak to protect modules from being unmerged,
+ # but we don't protect modules from being overwritten if they are
+ # upgraded. We effectively only want one half of the config protection
+ # functionality for /lib/modules. For portage-ng both capabilities
+ # should be able to be independently specified.
+ # TODO: For rebuilds, re-parent previous modules to the new
+ # installed instance (so they are not orphans). For normal
+ # uninstall (not rebuild/reinstall), remove the modules along
+ # with all other files (leave no orphans).
+ if obj.startswith(modprotect):
+ show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
+ continue
+
+ # Don't unlink symlinks to directories here since that can
+ # remove /lib and /usr/lib symlinks.
+ if unmerge_orphans and \
+ lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
+ not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
+ not self.isprotected(obj):
+ try:
+ unlink(obj, lstatobj)
+ except EnvironmentError as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("<<<", "", file_type, obj)
+ continue
+
+ lmtime = str(lstatobj[stat.ST_MTIME])
+ if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
+ show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
+ continue
+
+ if pkgfiles[objkey][0] == "dir":
+ if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
+ show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
+ continue
+ mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
+ elif pkgfiles[objkey][0] == "sym":
+ if not islink:
+ show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
+ continue
+
+ # If this symlink points to a directory then we don't want
+ # to unmerge it if there are any other packages that
+ # installed files into the directory via this symlink
+ # (see bug #326685).
+ # TODO: Resolving a symlink to a directory will require
+ # simulation if $ROOT != / and the link is not relative.
+ if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
+ and obj.startswith(real_root):
+
+ relative_path = obj[real_root_len:]
+ try:
+ target_dir_contents = os.listdir(obj)
+ except OSError:
+ pass
+ else:
+ if target_dir_contents:
+ # If all the children are regular files owned
+ # by this package, then the symlink should be
+ # safe to unmerge.
+ all_owned = True
+ for child in target_dir_contents:
+ child = os.path.join(relative_path, child)
+ if not self.isowner(child):
+ all_owned = False
+ break
+ try:
+ child_lstat = os.lstat(os.path.join(
+ real_root, child.lstrip(os.sep)))
+ except OSError:
+ continue
+
+ if not stat.S_ISREG(child_lstat.st_mode):
+ # Nested symlinks or directories make
+ # the issue very complex, so just
+ # preserve the symlink in order to be
+ # on the safe side.
+ all_owned = False
+ break
+
+ if not all_owned:
+ protected_symlinks.setdefault(
+ (statobj.st_dev, statobj.st_ino),
+ []).append(relative_path)
+ show_unmerge("---", unmerge_desc["!empty"],
+ file_type, obj)
+ continue
+
+ # Go ahead and unlink symlinks to directories here when
+ # they're actually recorded as symlinks in the contents.
+ # Normally, symlinks such as /lib -> lib64 are not recorded
+ # as symlinks in the contents of a package. If a package
+ # installs something into ${D}/lib/, it is recorded in the
+ # contents as a directory even if it happens to correspond
+ # to a symlink when it's merged to the live filesystem.
+ try:
+ unlink(obj, lstatobj)
+ show_unmerge("<<<", "", file_type, obj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "obj":
+ if statobj is None or not stat.S_ISREG(statobj.st_mode):
+ show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
+ continue
+ mymd5 = None
+ try:
+ mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
+ except FileNotFound as e:
+ # the file has disappeared between now and our stat call
+ show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
+ continue
+
+ # string.lower is needed because db entries used to be in upper-case. The
+ # string.lower allows for backwards compatibility.
+ if mymd5 != pkgfiles[objkey][2].lower():
+ show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
+ continue
+ try:
+ unlink(obj, lstatobj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("<<<", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "fif":
+ if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
+ show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
+ continue
+ show_unmerge("---", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "dev":
+ show_unmerge("---", "", file_type, obj)
+
+ mydirs = sorted(mydirs)
+ mydirs.reverse()
+
+ for obj, inode_key in mydirs:
+ # Treat any directory named "info" as a candidate here,
+ # since it might have been in INFOPATH previously even
+ # though it may not be there now.
+ if inode_key in infodirs_inodes or \
+ os.path.basename(obj) == "info":
+ try:
+ remaining = os.listdir(obj)
+ except OSError:
+ pass
+ else:
+ cleanup_info_dir = ()
+ if remaining and \
+ len(remaining) <= len(infodir_cleanup):
+ if not set(remaining).difference(infodir_cleanup):
+ cleanup_info_dir = remaining
+
+ for child in cleanup_info_dir:
+ child = os.path.join(obj, child)
+ try:
+ lstatobj = os.lstat(child)
+ if stat.S_ISREG(lstatobj.st_mode):
+ unlink(child, lstatobj)
+ show_unmerge("<<<", "", "obj", child)
+ except EnvironmentError as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", "obj", child)
+ try:
+ if bsd_chflags:
+ lstatobj = os.lstat(obj)
+ if lstatobj.st_flags != 0:
+ bsd_chflags.lchflags(obj, 0)
+ parent_name = os.path.dirname(obj)
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = os.stat(parent_name).st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(parent_name, 0)
+ try:
+ os.rmdir(obj)
+ finally:
+ if bsd_chflags and pflags != 0:
+ # Restore the parent flags we saved before unlinking
+ bsd_chflags.chflags(parent_name, pflags)
+ show_unmerge("<<<", "", "dir", obj)
+ except EnvironmentError as e:
+ if e.errno not in ignored_rmdir_errnos:
+ raise
+ if e.errno != errno.ENOENT:
+ show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
+ del e
+ else:
+ # When a directory is successfully removed, there's
+ # no need to protect symlinks that point to it.
+ unmerge_syms = protected_symlinks.pop(inode_key, None)
+ if unmerge_syms is not None:
+ for relative_path in unmerge_syms:
+ obj = os.path.join(real_root,
+ relative_path.lstrip(os.sep))
+ try:
+ unlink(obj, os.lstat(obj))
+ show_unmerge("<<<", "", "sym", obj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", "sym", obj)
+
+ if protected_symlinks:
+ msg = "One or more symlinks to directories have been " + \
+ "preserved in order to ensure that files installed " + \
+ "via these symlinks remain accessible:"
+ lines = textwrap.wrap(msg, 72)
+ lines.append("")
+ flat_list = set()
+ flat_list.update(*protected_symlinks.values())
+ flat_list = sorted(flat_list)
+ for f in flat_list:
+ lines.append("\t%s" % (os.path.join(real_root,
+ f.lstrip(os.sep))))
+ lines.append("")
+ self._elog("eerror", "postrm", lines)
+
+ # Remove stale entries from config memory.
+ if stale_confmem:
+ for filename in stale_confmem:
+ del cfgfiledict[filename]
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+
+ #remove self from vartree database so that our own virtual gets zapped if we're the last node
+ self.vartree.zap(self.mycpv)
+
+ def isowner(self, filename, destroot=None):
+ """
+ Check if a file belongs to this package. This may
+ result in a stat call for the parent directory of
+ every installed file, since the inode numbers are
+ used to work around the problem of ambiguous paths
+ caused by symlinked directories. The results of
+ stat calls are cached to optimize multiple calls
+ to this method.
+
+ @param filename:
+ @type filename:
+ @param destroot:
+ @type destroot:
+ @rtype: Boolean
+ @returns:
+ 1. True if this package owns the file.
+ 2. False if this package does not own the file.
+ """
+
+ if destroot is not None and destroot != self._eroot:
+ warnings.warn("The second parameter of the " + \
+ "portage.dbapi.vartree.dblink.isowner()" + \
+ " is now unused. Instead " + \
+ "self.settings['EROOT'] will be used.",
+ DeprecationWarning, stacklevel=2)
+
+ return bool(self._match_contents(filename))
+
+ def _match_contents(self, filename, destroot=None):
+ """
+ The matching contents entry is returned, which is useful
+ since the path may differ from the one given by the caller,
+ due to symlinks.
+
+ @rtype: String
+ @return: the contents entry corresponding to the given path, or False
+ if the file is not owned by this package.
+ """
+
+ filename = _unicode_decode(filename,
+ encoding=_encodings['content'], errors='strict')
+
+ if destroot is not None and destroot != self._eroot:
+ warnings.warn("The second parameter of the " + \
+ "portage.dbapi.vartree.dblink._match_contents()" + \
+ " is now unused. Instead " + \
+ "self.settings['ROOT'] will be used.",
+ DeprecationWarning, stacklevel=2)
+
+ # don't use EROOT here, image already contains EPREFIX
+ destroot = self.settings['ROOT']
+
+ # The given filename argument might have a different encoding than the
+ # the filenames contained in the contents, so use separate wrapped os
+ # modules for each. The basename is more likely to contain non-ascii
+ # characters than the directory path, so use os_filename_arg for all
+ # operations involving the basename of the filename arg.
+ os_filename_arg = _os_merge
+ os = _os_merge
+
+ try:
+ _unicode_encode(filename,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os_filename_arg = portage.os
+
+ destfile = normalize_path(
+ os_filename_arg.path.join(destroot,
+ filename.lstrip(os_filename_arg.path.sep)))
+
+ pkgfiles = self.getcontents()
+ if pkgfiles and destfile in pkgfiles:
+ return destfile
+ if pkgfiles:
+ basename = os_filename_arg.path.basename(destfile)
+ if self._contents_basenames is None:
+
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ self._contents_basenames = set(
+ os.path.basename(x) for x in pkgfiles)
+ if basename not in self._contents_basenames:
+ # This is a shortcut that, in most cases, allows us to
+ # eliminate this package as an owner without the need
+ # to examine inode numbers of parent directories.
+ return False
+
+ # Use stat rather than lstat since we want to follow
+ # any symlinks to the real parent directory.
+ parent_path = os_filename_arg.path.dirname(destfile)
+ try:
+ parent_stat = os_filename_arg.stat(parent_path)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ return False
+ if self._contents_inodes is None:
+
+ if os is _os_merge:
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ self._contents_inodes = {}
+ parent_paths = set()
+ for x in pkgfiles:
+ p_path = os.path.dirname(x)
+ if p_path in parent_paths:
+ continue
+ parent_paths.add(p_path)
+ try:
+ s = os.stat(p_path)
+ except OSError:
+ pass
+ else:
+ inode_key = (s.st_dev, s.st_ino)
+ # Use lists of paths in case multiple
+ # paths reference the same inode.
+ p_path_list = self._contents_inodes.get(inode_key)
+ if p_path_list is None:
+ p_path_list = []
+ self._contents_inodes[inode_key] = p_path_list
+ if p_path not in p_path_list:
+ p_path_list.append(p_path)
+
+ p_path_list = self._contents_inodes.get(
+ (parent_stat.st_dev, parent_stat.st_ino))
+ if p_path_list:
+ for p_path in p_path_list:
+ x = os_filename_arg.path.join(p_path, basename)
+ if x in pkgfiles:
+ return x
+
+ return False
+
+ def _linkmap_rebuild(self, **kwargs):
+ """
+ Rebuild the self._linkmap if it's not broken due to missing
+ scanelf binary. Also, return early if preserve-libs is disabled
+ and the preserve-libs registry is empty.
+ """
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ ("preserve-libs" not in self.settings.features and \
+ not self.vartree.dbapi._plib_registry.hasEntries()):
+ return
+ try:
+ self.vartree.dbapi._linkmap.rebuild(**kwargs)
+ except CommandNotFound as e:
+ self._linkmap_broken = True
+ self._display_merge(_("!!! Disabling preserve-libs " \
+ "due to error: Command Not Found: %s\n") % (e,),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _find_libs_to_preserve(self, unmerge=False):
+ """
+ Get set of relative paths for libraries to be preserved. When
+ unmerge is False, file paths to preserve are selected from
+ self._installed_instance. Otherwise, paths are selected from
+ self.
+ """
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ (not unmerge and self._installed_instance is None) or \
+ "preserve-libs" not in self.settings.features:
+ return set()
+
+ os = _os_merge
+ linkmap = self.vartree.dbapi._linkmap
+ if unmerge:
+ installed_instance = self
+ else:
+ installed_instance = self._installed_instance
+ old_contents = installed_instance.getcontents()
+ root = self.settings['ROOT']
+ root_len = len(root) - 1
+ lib_graph = digraph()
+ path_node_map = {}
+
+ def path_to_node(path):
+ node = path_node_map.get(path)
+ if node is None:
+ node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
+ alt_path_node = lib_graph.get(node)
+ if alt_path_node is not None:
+ node = alt_path_node
+ node.alt_paths.add(path)
+ path_node_map[path] = node
+ return node
+
+ consumer_map = {}
+ provider_nodes = set()
+ # Create provider nodes and add them to the graph.
+ for f_abs in old_contents:
+
+ if os is _os_merge:
+ try:
+ _unicode_encode(f_abs,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(f_abs,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ f = f_abs[root_len:]
+ if not unmerge and self.isowner(f):
+ # We have an indentically named replacement file,
+ # so we don't try to preserve the old copy.
+ continue
+ try:
+ consumers = linkmap.findConsumers(f,
+ exclude_providers=(installed_instance.isowner,))
+ except KeyError:
+ continue
+ if not consumers:
+ continue
+ provider_node = path_to_node(f)
+ lib_graph.add(provider_node, None)
+ provider_nodes.add(provider_node)
+ consumer_map[provider_node] = consumers
+
+ # Create consumer nodes and add them to the graph.
+ # Note that consumers can also be providers.
+ for provider_node, consumers in consumer_map.items():
+ for c in consumers:
+ consumer_node = path_to_node(c)
+ if installed_instance.isowner(c) and \
+ consumer_node not in provider_nodes:
+ # This is not a provider, so it will be uninstalled.
+ continue
+ lib_graph.add(provider_node, consumer_node)
+
+ # Locate nodes which should be preserved. They consist of all
+ # providers that are reachable from consumers that are not
+ # providers themselves.
+ preserve_nodes = set()
+ for consumer_node in lib_graph.root_nodes():
+ if consumer_node in provider_nodes:
+ continue
+ # Preserve all providers that are reachable from this consumer.
+ node_stack = lib_graph.child_nodes(consumer_node)
+ while node_stack:
+ provider_node = node_stack.pop()
+ if provider_node in preserve_nodes:
+ continue
+ preserve_nodes.add(provider_node)
+ node_stack.extend(lib_graph.child_nodes(provider_node))
+
+ preserve_paths = set()
+ for preserve_node in preserve_nodes:
+ # Preserve the library itself, and also preserve the
+ # soname symlink which is the only symlink that is
+ # strictly required.
+ hardlinks = set()
+ soname_symlinks = set()
+ soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
+ for f in preserve_node.alt_paths:
+ f_abs = os.path.join(root, f.lstrip(os.sep))
+ try:
+ if stat.S_ISREG(os.lstat(f_abs).st_mode):
+ hardlinks.add(f)
+ elif os.path.basename(f) == soname:
+ soname_symlinks.add(f)
+ except OSError:
+ pass
+
+ if hardlinks:
+ preserve_paths.update(hardlinks)
+ preserve_paths.update(soname_symlinks)
+
+ return preserve_paths
+
+ def _add_preserve_libs_to_contents(self, preserve_paths):
+ """
+ Preserve libs returned from _find_libs_to_preserve().
+ """
+
+ if not preserve_paths:
+ return
+
+ os = _os_merge
+ showMessage = self._display_merge
+ root = self.settings['ROOT']
+
+ # Copy contents entries from the old package to the new one.
+ new_contents = self.getcontents().copy()
+ old_contents = self._installed_instance.getcontents()
+ for f in sorted(preserve_paths):
+ f = _unicode_decode(f,
+ encoding=_encodings['content'], errors='strict')
+ f_abs = os.path.join(root, f.lstrip(os.sep))
+ contents_entry = old_contents.get(f_abs)
+ if contents_entry is None:
+ # This will probably never happen, but it might if one of the
+ # paths returned from findConsumers() refers to one of the libs
+ # that should be preserved yet the path is not listed in the
+ # contents. Such a path might belong to some other package, so
+ # it shouldn't be preserved here.
+ showMessage(_("!!! File '%s' will not be preserved "
+ "due to missing contents entry\n") % (f_abs,),
+ level=logging.ERROR, noiselevel=-1)
+ preserve_paths.remove(f)
+ continue
+ new_contents[f_abs] = contents_entry
+ obj_type = contents_entry[0]
+ showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
+ noiselevel=-1)
+ # Add parent directories to contents if necessary.
+ parent_dir = os.path.dirname(f_abs)
+ while len(parent_dir) > len(root):
+ new_contents[parent_dir] = ["dir"]
+ prev = parent_dir
+ parent_dir = os.path.dirname(parent_dir)
+ if prev == parent_dir:
+ break
+ outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
+ write_contents(new_contents, root, outfile)
+ outfile.close()
+ self._clear_contents_cache()
+
+ def _find_unused_preserved_libs(self, unmerge_no_replacement):
+ """
+ Find preserved libraries that don't have any consumers left.
+ """
+
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ not self.vartree.dbapi._plib_registry.hasEntries():
+ return {}
+
+ # Since preserved libraries can be consumers of other preserved
+ # libraries, use a graph to track consumer relationships.
+ plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
+ linkmap = self.vartree.dbapi._linkmap
+ lib_graph = digraph()
+ preserved_nodes = set()
+ preserved_paths = set()
+ path_cpv_map = {}
+ path_node_map = {}
+ root = self.settings['ROOT']
+
+ def path_to_node(path):
+ node = path_node_map.get(path)
+ if node is None:
+ node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
+ alt_path_node = lib_graph.get(node)
+ if alt_path_node is not None:
+ node = alt_path_node
+ node.alt_paths.add(path)
+ path_node_map[path] = node
+ return node
+
+ for cpv, plibs in plib_dict.items():
+ for f in plibs:
+ path_cpv_map[f] = cpv
+ preserved_node = path_to_node(f)
+ if not preserved_node.file_exists():
+ continue
+ lib_graph.add(preserved_node, None)
+ preserved_paths.add(f)
+ preserved_nodes.add(preserved_node)
+ for c in self.vartree.dbapi._linkmap.findConsumers(f):
+ consumer_node = path_to_node(c)
+ if not consumer_node.file_exists():
+ continue
+ # Note that consumers may also be providers.
+ lib_graph.add(preserved_node, consumer_node)
+
+ # Eliminate consumers having providers with the same soname as an
+ # installed library that is not preserved. This eliminates
+ # libraries that are erroneously preserved due to a move from one
+ # directory to another.
+ # Also eliminate consumers that are going to be unmerged if
+ # unmerge_no_replacement is True.
+ provider_cache = {}
+ for preserved_node in preserved_nodes:
+ soname = linkmap.getSoname(preserved_node)
+ for consumer_node in lib_graph.parent_nodes(preserved_node):
+ if consumer_node in preserved_nodes:
+ continue
+ if unmerge_no_replacement:
+ will_be_unmerged = True
+ for path in consumer_node.alt_paths:
+ if not self.isowner(path):
+ will_be_unmerged = False
+ break
+ if will_be_unmerged:
+ # This consumer is not preserved and it is
+ # being unmerged, so drop this edge.
+ lib_graph.remove_edge(preserved_node, consumer_node)
+ continue
+
+ providers = provider_cache.get(consumer_node)
+ if providers is None:
+ providers = linkmap.findProviders(consumer_node)
+ provider_cache[consumer_node] = providers
+ providers = providers.get(soname)
+ if providers is None:
+ continue
+ for provider in providers:
+ if provider in preserved_paths:
+ continue
+ provider_node = path_to_node(provider)
+ if not provider_node.file_exists():
+ continue
+ if provider_node in preserved_nodes:
+ continue
+ # An alternative provider seems to be
+ # installed, so drop this edge.
+ lib_graph.remove_edge(preserved_node, consumer_node)
+ break
+
+ cpv_lib_map = {}
+ while lib_graph:
+ root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
+ if not root_nodes:
+ break
+ lib_graph.difference_update(root_nodes)
+ unlink_list = set()
+ for node in root_nodes:
+ unlink_list.update(node.alt_paths)
+ unlink_list = sorted(unlink_list)
+ for obj in unlink_list:
+ cpv = path_cpv_map.get(obj)
+ if cpv is None:
+ # This means that a symlink is in the preserved libs
+ # registry, but the actual lib it points to is not.
+ self._display_merge(_("!!! symlink to lib is preserved, "
+ "but not the lib itself:\n!!! '%s'\n") % (obj,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ removed = cpv_lib_map.get(cpv)
+ if removed is None:
+ removed = set()
+ cpv_lib_map[cpv] = removed
+ removed.add(obj)
+
+ return cpv_lib_map
+
+ def _remove_preserved_libs(self, cpv_lib_map):
+ """
+ Remove files returned from _find_unused_preserved_libs().
+ """
+
+ os = _os_merge
+
+ files_to_remove = set()
+ for files in cpv_lib_map.values():
+ files_to_remove.update(files)
+ files_to_remove = sorted(files_to_remove)
+ showMessage = self._display_merge
+ root = self.settings['ROOT']
+
+ parent_dirs = set()
+ for obj in files_to_remove:
+ obj = os.path.join(root, obj.lstrip(os.sep))
+ parent_dirs.add(os.path.dirname(obj))
+ if os.path.islink(obj):
+ obj_type = _("sym")
+ else:
+ obj_type = _("obj")
+ try:
+ os.unlink(obj)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ else:
+ showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
+ noiselevel=-1)
+
+ # Remove empty parent directories if possible.
+ while parent_dirs:
+ x = parent_dirs.pop()
+ while True:
+ try:
+ os.rmdir(x)
+ except OSError:
+ break
+ prev = x
+ x = os.path.dirname(x)
+ if x == prev:
+ break
+
+ self.vartree.dbapi._plib_registry.pruneNonExisting()
+
+ def _collision_protect(self, srcroot, destroot, mypkglist,
+ file_list, symlink_list):
+
+ os = _os_merge
+
+ collision_ignore = set([normalize_path(myignore) for myignore in \
+ portage.util.shlex_split(
+ self.settings.get("COLLISION_IGNORE", ""))])
+
+ # For collisions with preserved libraries, the current package
+ # will assume ownership and the libraries will be unregistered.
+ if self.vartree.dbapi._plib_registry is None:
+ # preserve-libs is entirely disabled
+ plib_cpv_map = None
+ plib_paths = None
+ plib_inodes = {}
+ else:
+ plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
+ plib_cpv_map = {}
+ plib_paths = set()
+ for cpv, paths in plib_dict.items():
+ plib_paths.update(paths)
+ for f in paths:
+ plib_cpv_map[f] = cpv
+ plib_inodes = self._lstat_inode_map(plib_paths)
+
+ plib_collisions = {}
+
+ showMessage = self._display_merge
+ stopmerge = False
+ collisions = []
+ symlink_collisions = []
+ destroot = self.settings['ROOT']
+ showMessage(_(" %s checking %d files for package collisions\n") % \
+ (colorize("GOOD", "*"), len(file_list) + len(symlink_list)))
+ for i, (f, f_type) in enumerate(chain(
+ ((f, "reg") for f in file_list),
+ ((f, "sym") for f in symlink_list))):
+ if i % 1000 == 0 and i != 0:
+ showMessage(_("%d files checked ...\n") % i)
+
+ dest_path = normalize_path(
+ os.path.join(destroot, f.lstrip(os.path.sep)))
+ try:
+ dest_lstat = os.lstat(dest_path)
+ except EnvironmentError as e:
+ if e.errno == errno.ENOENT:
+ del e
+ continue
+ elif e.errno == errno.ENOTDIR:
+ del e
+ # A non-directory is in a location where this package
+ # expects to have a directory.
+ dest_lstat = None
+ parent_path = dest_path
+ while len(parent_path) > len(destroot):
+ parent_path = os.path.dirname(parent_path)
+ try:
+ dest_lstat = os.lstat(parent_path)
+ break
+ except EnvironmentError as e:
+ if e.errno != errno.ENOTDIR:
+ raise
+ del e
+ if not dest_lstat:
+ raise AssertionError(
+ "unable to find non-directory " + \
+ "parent for '%s'" % dest_path)
+ dest_path = parent_path
+ f = os.path.sep + dest_path[len(destroot):]
+ if f in collisions:
+ continue
+ else:
+ raise
+ if f[0] != "/":
+ f="/"+f
+
+ if stat.S_ISDIR(dest_lstat.st_mode):
+ if f_type == "sym":
+ # This case is explicitly banned
+ # by PMS (see bug #326685).
+ symlink_collisions.append(f)
+ collisions.append(f)
+ continue
+
+ plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
+ if plibs:
+ for path in plibs:
+ cpv = plib_cpv_map[path]
+ paths = plib_collisions.get(cpv)
+ if paths is None:
+ paths = set()
+ plib_collisions[cpv] = paths
+ paths.add(path)
+ # The current package will assume ownership and the
+ # libraries will be unregistered, so exclude this
+ # path from the normal collisions.
+ continue
+
+ isowned = False
+ full_path = os.path.join(destroot, f.lstrip(os.path.sep))
+ for ver in mypkglist:
+ if ver.isowner(f):
+ isowned = True
+ break
+ if not isowned and self.isprotected(full_path):
+ isowned = True
+ if not isowned:
+ stopmerge = True
+ if collision_ignore:
+ if f in collision_ignore:
+ stopmerge = False
+ else:
+ for myignore in collision_ignore:
+ if f.startswith(myignore + os.path.sep):
+ stopmerge = False
+ break
+ if stopmerge:
+ collisions.append(f)
+ return collisions, symlink_collisions, plib_collisions
+
+ def _lstat_inode_map(self, path_iter):
+ """
+ Use lstat to create a map of the form:
+ {(st_dev, st_ino) : set([path1, path2, ...])}
+ Multiple paths may reference the same inode due to hardlinks.
+ All lstat() calls are relative to self.myroot.
+ """
+
+ os = _os_merge
+
+ root = self.settings['ROOT']
+ inode_map = {}
+ for f in path_iter:
+ path = os.path.join(root, f.lstrip(os.sep))
+ try:
+ st = os.lstat(path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+ del e
+ continue
+ key = (st.st_dev, st.st_ino)
+ paths = inode_map.get(key)
+ if paths is None:
+ paths = set()
+ inode_map[key] = paths
+ paths.add(f)
+ return inode_map
+
+ def _security_check(self, installed_instances):
+ if not installed_instances:
+ return 0
+
+ os = _os_merge
+
+ showMessage = self._display_merge
+
+ file_paths = set()
+ for dblnk in installed_instances:
+ file_paths.update(dblnk.getcontents())
+ inode_map = {}
+ real_paths = set()
+ for i, path in enumerate(file_paths):
+
+ if os is _os_merge:
+ try:
+ _unicode_encode(path,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(path,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ try:
+ s = os.lstat(path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+ del e
+ continue
+ if not stat.S_ISREG(s.st_mode):
+ continue
+ path = os.path.realpath(path)
+ if path in real_paths:
+ continue
+ real_paths.add(path)
+ if s.st_nlink > 1 and \
+ s.st_mode & (stat.S_ISUID | stat.S_ISGID):
+ k = (s.st_dev, s.st_ino)
+ inode_map.setdefault(k, []).append((path, s))
+ suspicious_hardlinks = []
+ for path_list in inode_map.values():
+ path, s = path_list[0]
+ if len(path_list) == s.st_nlink:
+ # All hardlinks seem to be owned by this package.
+ continue
+ suspicious_hardlinks.append(path_list)
+ if not suspicious_hardlinks:
+ return 0
+
+ msg = []
+ msg.append(_("suid/sgid file(s) "
+ "with suspicious hardlink(s):"))
+ msg.append("")
+ for path_list in suspicious_hardlinks:
+ for path, s in path_list:
+ msg.append("\t%s" % path)
+ msg.append("")
+ msg.append(_("See the Gentoo Security Handbook "
+ "guide for advice on how to proceed."))
+
+ self._eerror("preinst", msg)
+
+ return 1
+
+ def _eqawarn(self, phase, lines):
+ self._elog("eqawarn", phase, lines)
+
+ def _eerror(self, phase, lines):
+ self._elog("eerror", phase, lines)
+
+ def _elog(self, funcname, phase, lines):
+ func = getattr(portage.elog.messages, funcname)
+ if self._scheduler is None:
+ for l in lines:
+ func(l, phase=phase, key=self.mycpv)
+ else:
+ background = self.settings.get("PORTAGE_BACKGROUND") == "1"
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ out = io.StringIO()
+ for line in lines:
+ func(line, phase=phase, key=self.mycpv, out=out)
+ msg = out.getvalue()
+ self._scheduler.output(msg,
+ background=background, log_path=log_path)
+
+ def _elog_process(self, phasefilter=None):
+ cpv = self.mycpv
+ if self._pipe is None:
+ elog_process(cpv, self.settings, phasefilter=phasefilter)
+ else:
+ logdir = os.path.join(self.settings["T"], "logging")
+ ebuild_logentries = collect_ebuild_messages(logdir)
+ py_logentries = collect_messages(key=cpv).get(cpv, {})
+ logentries = _merge_logentries(py_logentries, ebuild_logentries)
+ funcnames = {
+ "INFO": "einfo",
+ "LOG": "elog",
+ "WARN": "ewarn",
+ "QA": "eqawarn",
+ "ERROR": "eerror"
+ }
+ str_buffer = []
+ for phase, messages in logentries.items():
+ for key, lines in messages:
+ funcname = funcnames[key]
+ if isinstance(lines, basestring):
+ lines = [lines]
+ for line in lines:
+ fields = (funcname, phase, cpv, line.rstrip('\n'))
+ str_buffer.append(' '.join(fields))
+ str_buffer.append('\n')
+ if str_buffer:
+ os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
+
+ def _emerge_log(self, msg):
+ emergelog(False, msg)
+
+ def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
+ mydbapi=None, prev_mtimes=None, counter=None):
+ """
+
+ This function does the following:
+
+ calls self._preserve_libs if FEATURES=preserve-libs
+ calls self._collision_protect if FEATURES=collision-protect
+ calls doebuild(mydo=pkg_preinst)
+ Merges the package to the livefs
+ unmerges old version (if required)
+ calls doebuild(mydo=pkg_postinst)
+ calls env_update
+
+ @param srcroot: Typically this is ${D}
+ @type srcroot: String (Path)
+ @param destroot: ignored, self.settings['ROOT'] is used instead
+ @type destroot: String (Path)
+ @param inforoot: root of the vardb entry ?
+ @type inforoot: String (Path)
+ @param myebuild: path to the ebuild that we are processing
+ @type myebuild: String (Path)
+ @param mydbapi: dbapi which is handed to doebuild.
+ @type mydbapi: portdbapi instance
+ @param prev_mtimes: { Filename:mtime } mapping for env_update
+ @type prev_mtimes: Dictionary
+ @rtype: Boolean
+ @returns:
+ 1. 0 on success
+ 2. 1 on failure
+
+ secondhand is a list of symlinks that have been skipped due to their target
+ not existing; we will merge these symlinks at a later time.
+ """
+
+ os = _os_merge
+
+ srcroot = _unicode_decode(srcroot,
+ encoding=_encodings['content'], errors='strict')
+ destroot = self.settings['ROOT']
+ inforoot = _unicode_decode(inforoot,
+ encoding=_encodings['content'], errors='strict')
+ myebuild = _unicode_decode(myebuild,
+ encoding=_encodings['content'], errors='strict')
+
+ showMessage = self._display_merge
+ srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
+
+ if not os.path.isdir(srcroot):
+ showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ slot = ''
+ for var_name in ('CHOST', 'SLOT'):
+ if var_name == 'CHOST' and self.cat == 'virtual':
+ try:
+ os.unlink(os.path.join(inforoot, var_name))
+ except OSError:
+ pass
+ continue
+
+ try:
+ val = io.open(_unicode_encode(
+ os.path.join(inforoot, var_name),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace').readline().strip()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ val = ''
+
+ if var_name == 'SLOT':
+ slot = val
+
+ if not slot.strip():
+ slot = self.settings.get(var_name, '')
+ if not slot.strip():
+ showMessage(_("!!! SLOT is undefined\n"),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ write_atomic(os.path.join(inforoot, var_name), slot + '\n')
+
+ if val != self.settings.get(var_name, ''):
+ self._eqawarn('preinst',
+ [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
+ {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
+
+ def eerror(lines):
+ self._eerror("preinst", lines)
+
+ if not os.path.exists(self.dbcatdir):
+ ensure_dirs(self.dbcatdir)
+
+ otherversions = []
+ for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
+ otherversions.append(v.split("/")[1])
+
+ cp = self.mysplit[0]
+ slot_atom = "%s:%s" % (cp, slot)
+
+ # filter any old-style virtual matches
+ slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
+ if cpv_getkey(cpv) == cp]
+
+ if self.mycpv not in slot_matches and \
+ self.vartree.dbapi.cpv_exists(self.mycpv):
+ # handle multislot or unapplied slotmove
+ slot_matches.append(self.mycpv)
+
+ others_in_slot = []
+ from portage import config
+ for cur_cpv in slot_matches:
+ # Clone the config in case one of these has to be unmerged since
+ # we need it to have private ${T} etc... for things like elog.
+ settings_clone = config(clone=self.settings)
+ settings_clone.pop("PORTAGE_BUILDIR_LOCKED", None)
+ settings_clone.reset()
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=settings_clone,
+ vartree=self.vartree, treetype="vartree",
+ scheduler=self._scheduler, pipe=self._pipe))
+
+ retval = self._security_check(others_in_slot)
+ if retval:
+ return retval
+
+ if slot_matches:
+ # Used by self.isprotected().
+ max_dblnk = None
+ max_counter = -1
+ for dblnk in others_in_slot:
+ cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
+ if cur_counter > max_counter:
+ max_counter = cur_counter
+ max_dblnk = dblnk
+ self._installed_instance = max_dblnk
+
+ # We check for unicode encoding issues after src_install. However,
+ # the check must be repeated here for binary packages (it's
+ # inexpensive since we call os.walk() here anyway).
+ unicode_errors = []
+
+ while True:
+
+ unicode_error = False
+
+ myfilelist = []
+ mylinklist = []
+ paths_with_newlines = []
+ srcroot_len = len(srcroot)
+ def onerror(e):
+ raise
+ for parent, dirs, files in os.walk(srcroot, onerror=onerror):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ new_parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='replace')
+ new_parent = _unicode_encode(new_parent,
+ encoding=_encodings['merge'], errors='backslashreplace')
+ new_parent = _unicode_decode(new_parent,
+ encoding=_encodings['merge'], errors='replace')
+ os.rename(parent, new_parent)
+ unicode_error = True
+ unicode_errors.append(new_parent[srcroot_len:])
+ break
+
+ for fname in files:
+ try:
+ fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ fpath = portage._os.path.join(
+ parent.encode(_encodings['merge']), fname)
+ new_fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fname = _unicode_encode(new_fname,
+ encoding=_encodings['merge'], errors='backslashreplace')
+ new_fname = _unicode_decode(new_fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fpath = os.path.join(parent, new_fname)
+ os.rename(fpath, new_fpath)
+ unicode_error = True
+ unicode_errors.append(new_fpath[srcroot_len:])
+ fname = new_fname
+ fpath = new_fpath
+ else:
+ fpath = os.path.join(parent, fname)
+
+ relative_path = fpath[srcroot_len:]
+
+ if "\n" in relative_path:
+ paths_with_newlines.append(relative_path)
+
+ file_mode = os.lstat(fpath).st_mode
+ if stat.S_ISREG(file_mode):
+ myfilelist.append(relative_path)
+ elif stat.S_ISLNK(file_mode):
+ # Note: os.walk puts symlinks to directories in the "dirs"
+ # list and it does not traverse them since that could lead
+ # to an infinite recursion loop.
+ mylinklist.append(relative_path)
+
+ if unicode_error:
+ break
+
+ if not unicode_error:
+ break
+
+ if unicode_errors:
+ eerror(portage._merge_unicode_error(unicode_errors))
+
+ if paths_with_newlines:
+ msg = []
+ msg.append(_("This package installs one or more files containing a newline (\\n) character:"))
+ msg.append("")
+ paths_with_newlines.sort()
+ for f in paths_with_newlines:
+ msg.append("\t/%s" % (f.replace("\n", "\\n")))
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.mycpv)
+ msg.append("")
+ eerror(msg)
+ return 1
+
+ # If there are no files to merge, and an installed package in the same
+ # slot has files, it probably means that something went wrong.
+ if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
+ not myfilelist and not mylinklist and others_in_slot:
+ installed_files = None
+ for other_dblink in others_in_slot:
+ installed_files = other_dblink.getcontents()
+ if not installed_files:
+ continue
+ from textwrap import wrap
+ wrap_width = 72
+ msg = []
+ d = {
+ "new_cpv":self.mycpv,
+ "old_cpv":other_dblink.mycpv
+ }
+ msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
+ "any files, but the currently installed '%(old_cpv)s'"
+ " package has the following files: ") % d, wrap_width))
+ msg.append("")
+ msg.extend(sorted(installed_files))
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.mycpv)
+ msg.append("")
+ msg.extend(wrap(
+ _("Manually run `emerge --unmerge =%s` if you "
+ "really want to remove the above files. Set "
+ "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
+ "/etc/make.conf if you do not want to "
+ "abort in cases like this.") % other_dblink.mycpv,
+ wrap_width))
+ eerror(msg)
+ if installed_files:
+ return 1
+
+ # check for package collisions
+ blockers = self._blockers
+ if blockers is None:
+ blockers = []
+ collisions, symlink_collisions, plib_collisions = \
+ self._collision_protect(srcroot, destroot,
+ others_in_slot + blockers, myfilelist, mylinklist)
+
+ # Make sure the ebuild environment is initialized and that ${T}/elog
+ # exists for logging of collision-protect eerror messages.
+ if myebuild is None:
+ myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+ doebuild_environment(myebuild, "preinst",
+ settings=self.settings, db=mydbapi)
+ self.settings["REPLACING_VERSIONS"] = " ".join(
+ [portage.versions.cpv_getversion(other.mycpv)
+ for other in others_in_slot])
+ prepare_build_dirs(settings=self.settings, cleanup=cleanup)
+
+ if collisions:
+ collision_protect = "collision-protect" in self.settings.features
+ protect_owned = "protect-owned" in self.settings.features
+ msg = _("This package will overwrite one or more files that"
+ " may belong to other packages (see list below).")
+ if not (collision_protect or protect_owned):
+ msg += _(" Add either \"collision-protect\" or"
+ " \"protect-owned\" to FEATURES in"
+ " make.conf if you would like the merge to abort"
+ " in cases like this. See the make.conf man page for"
+ " more information about these features.")
+ if self.settings.get("PORTAGE_QUIET") != "1":
+ msg += _(" You can use a command such as"
+ " `portageq owners / <filename>` to identify the"
+ " installed package that owns a file. If portageq"
+ " reports that only one package owns a file then do NOT"
+ " file a bug report. A bug report is only useful if it"
+ " identifies at least two or more packages that are known"
+ " to install the same file(s)."
+ " If a collision occurs and you"
+ " can not explain where the file came from then you"
+ " should simply ignore the collision since there is not"
+ " enough information to determine if a real problem"
+ " exists. Please do NOT file a bug report at"
+ " http://bugs.gentoo.org unless you report exactly which"
+ " two packages install the same file(s). Once again,"
+ " please do NOT file a bug report unless you have"
+ " completely understood the above message.")
+
+ self.settings["EBUILD_PHASE"] = "preinst"
+ from textwrap import wrap
+ msg = wrap(msg, 70)
+ if collision_protect:
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.settings.mycpv)
+ msg.append("")
+ msg.append(_("Detected file collision(s):"))
+ msg.append("")
+
+ for f in collisions:
+ msg.append("\t%s" % \
+ os.path.join(destroot, f.lstrip(os.path.sep)))
+
+ eerror(msg)
+
+ owners = None
+ if collision_protect or protect_owned or symlink_collisions:
+ msg = []
+ msg.append("")
+ msg.append(_("Searching all installed"
+ " packages for file collisions..."))
+ msg.append("")
+ msg.append(_("Press Ctrl-C to Stop"))
+ msg.append("")
+ eerror(msg)
+
+ if len(collisions) > 20:
+ # get_owners is slow for large numbers of files, so
+ # don't look them all up.
+ collisions = collisions[:20]
+ self.lockdb()
+ try:
+ owners = self.vartree.dbapi._owners.get_owners(collisions)
+ self.vartree.dbapi.flush_cache()
+ finally:
+ self.unlockdb()
+
+ for pkg, owned_files in owners.items():
+ cpv = pkg.mycpv
+ msg = []
+ msg.append("%s" % cpv)
+ for f in sorted(owned_files):
+ msg.append("\t%s" % os.path.join(destroot,
+ f.lstrip(os.path.sep)))
+ msg.append("")
+ eerror(msg)
+
+ if not owners:
+ eerror([_("None of the installed"
+ " packages claim the file(s)."), ""])
+
+ # The explanation about the collision and how to solve
+ # it may not be visible via a scrollback buffer, especially
+ # if the number of file collisions is large. Therefore,
+ # show a summary at the end.
+ abort = False
+ if collision_protect:
+ abort = True
+ msg = _("Package '%s' NOT merged due to file collisions.") % \
+ self.settings.mycpv
+ elif protect_owned and owners:
+ abort = True
+ msg = _("Package '%s' NOT merged due to file collisions.") % \
+ self.settings.mycpv
+ elif symlink_collisions:
+ abort = True
+ msg = _("Package '%s' NOT merged due to collision " + \
+ "between a symlink and a directory which is explicitly " + \
+ "forbidden by PMS (see bug #326685).") % \
+ (self.settings.mycpv,)
+ else:
+ msg = _("Package '%s' merged despite file collisions.") % \
+ self.settings.mycpv
+ msg += _(" If necessary, refer to your elog "
+ "messages for the whole content of the above message.")
+ eerror(wrap(msg, 70))
+
+ if abort:
+ return 1
+
+ # The merge process may move files out of the image directory,
+ # which causes invalidation of the .installed flag.
+ try:
+ os.unlink(os.path.join(
+ os.path.dirname(normalize_path(srcroot)), ".installed"))
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ self.dbdir = self.dbtmpdir
+ self.delete()
+ ensure_dirs(self.dbtmpdir)
+
+ # run preinst script
+ showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
+ {"cpv":self.mycpv, "destroot":destroot})
+ phase = EbuildPhase(background=False, phase="preinst",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ a = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if a != os.EX_OK:
+ showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
+ level=logging.ERROR, noiselevel=-1)
+ return a
+
+ # copy "info" files (like SLOT, CFLAGS, etc.) into the database
+ for x in os.listdir(inforoot):
+ self.copyfile(inforoot+"/"+x)
+
+ # write local package counter for recording
+ if counter is None:
+ counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
+ io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace').write(_unicode_decode(str(counter)))
+
+ self.updateprotect()
+
+ #if we have a file containing previously-merged config file md5sums, grab it.
+ self.vartree.dbapi._fs_lock()
+ try:
+ cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
+ if "NOCONFMEM" in self.settings:
+ cfgfiledict["IGNORE"]=1
+ else:
+ cfgfiledict["IGNORE"]=0
+
+ # Always behave like --noconfmem is enabled for downgrades
+ # so that people who don't know about this option are less
+ # likely to get confused when doing upgrade/downgrade cycles.
+ pv_split = catpkgsplit(self.mycpv)[1:]
+ for other in others_in_slot:
+ if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
+ cfgfiledict["IGNORE"] = 1
+ break
+
+ rval = self._merge_contents(srcroot, destroot, cfgfiledict)
+ if rval != os.EX_OK:
+ return rval
+ finally:
+ self.vartree.dbapi._fs_unlock()
+
+ # These caches are populated during collision-protect and the data
+ # they contain is now invalid. It's very important to invalidate
+ # the contents_inodes cache so that FEATURES=unmerge-orphans
+ # doesn't unmerge anything that belongs to this package that has
+ # just been merged.
+ for dblnk in others_in_slot:
+ dblnk._clear_contents_cache()
+ self._clear_contents_cache()
+
+ linkmap = self.vartree.dbapi._linkmap
+ plib_registry = self.vartree.dbapi._plib_registry
+ # We initialize preserve_paths to an empty set rather
+ # than None here because it plays an important role
+ # in prune_plib_registry logic by serving to indicate
+ # that we have a replacement for a package that's
+ # being unmerged.
+
+ preserve_paths = set()
+ needed = None
+ if not (self._linkmap_broken or linkmap is None or
+ plib_registry is None):
+ self.vartree.dbapi._fs_lock()
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+ needed = os.path.join(inforoot, linkmap._needed_aux_key)
+ self._linkmap_rebuild(include_file=needed)
+
+ # Preserve old libs if they are still in use
+ # TODO: Handle cases where the previous instance
+ # has already been uninstalled but it still has some
+ # preserved libraries in the registry that we may
+ # want to preserve here.
+ preserve_paths = self._find_libs_to_preserve()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ if preserve_paths:
+ self._add_preserve_libs_to_contents(preserve_paths)
+
+ # If portage is reinstalling itself, remove the old
+ # version now since we want to use the temporary
+ # PORTAGE_BIN_PATH that will be removed when we return.
+ reinstall_self = False
+ if self.myroot == "/" and \
+ match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
+ reinstall_self = True
+
+ emerge_log = self._emerge_log
+
+ # If we have any preserved libraries then autoclean
+ # is forced so that preserve-libs logic doesn't have
+ # to account for the additional complexity of the
+ # AUTOCLEAN=no mode.
+ autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" \
+ or preserve_paths
+
+ if autoclean:
+ emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
+
+ others_in_slot.append(self) # self has just been merged
+ for dblnk in list(others_in_slot):
+ if dblnk is self:
+ continue
+ if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
+ continue
+ showMessage(_(">>> Safely unmerging already-installed instance...\n"))
+ emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
+ others_in_slot.remove(dblnk) # dblnk will unmerge itself now
+ dblnk._linkmap_broken = self._linkmap_broken
+ dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
+ dblnk.settings.backup_changes("REPLACED_BY_VERSION")
+ unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
+ others_in_slot=others_in_slot, needed=needed,
+ preserve_paths=preserve_paths)
+ dblnk.settings.pop("REPLACED_BY_VERSION", None)
+
+ if unmerge_rval == os.EX_OK:
+ emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
+ else:
+ emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
+
+ self.lockdb()
+ try:
+ # TODO: Check status and abort if necessary.
+ dblnk.delete()
+ finally:
+ self.unlockdb()
+ showMessage(_(">>> Original instance of package unmerged safely.\n"))
+
+ if len(others_in_slot) > 1:
+ showMessage(colorize("WARN", _("WARNING:"))
+ + _(" AUTOCLEAN is disabled. This can cause serious"
+ " problems due to overlapping packages.\n"),
+ level=logging.WARN, noiselevel=-1)
+
+ # We hold both directory locks.
+ self.dbdir = self.dbpkgdir
+ self.lockdb()
+ try:
+ self.delete()
+ _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+ finally:
+ self.unlockdb()
+
+ # Check for file collisions with blocking packages
+ # and remove any colliding files from their CONTENTS
+ # since they now belong to this package.
+ self._clear_contents_cache()
+ contents = self.getcontents()
+ destroot_len = len(destroot) - 1
+ self.lockdb()
+ try:
+ for blocker in blockers:
+ self.vartree.dbapi.removeFromContents(blocker, iter(contents),
+ relative_paths=False)
+ finally:
+ self.unlockdb()
+
+ plib_registry = self.vartree.dbapi._plib_registry
+ if plib_registry:
+ self.vartree.dbapi._fs_lock()
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+
+ if preserve_paths:
+ # keep track of the libs we preserved
+ plib_registry.register(self.mycpv, slot, counter,
+ sorted(preserve_paths))
+
+ # Unregister any preserved libs that this package has overwritten
+ # and update the contents of the packages that owned them.
+ plib_dict = plib_registry.getPreservedLibs()
+ for cpv, paths in plib_collisions.items():
+ if cpv not in plib_dict:
+ continue
+ has_vdb_entry = False
+ if cpv != self.mycpv:
+ # If we've replaced another instance with the
+ # same cpv then the vdb entry no longer belongs
+ # to it, so we'll have to get the slot and counter
+ # from plib_registry._data instead.
+ self.vartree.dbapi.lock()
+ try:
+ try:
+ slot, counter = self.vartree.dbapi.aux_get(
+ cpv, ["SLOT", "COUNTER"])
+ except KeyError:
+ pass
+ else:
+ has_vdb_entry = True
+ self.vartree.dbapi.removeFromContents(
+ cpv, paths)
+ finally:
+ self.vartree.dbapi.unlock()
+
+ if not has_vdb_entry:
+ # It's possible for previously unmerged packages
+ # to have preserved libs in the registry, so try
+ # to retrieve the slot and counter from there.
+ has_registry_entry = False
+ for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
+ plib_registry._data.items():
+ if plib_cpv != cpv:
+ continue
+ try:
+ cp, slot = plib_cps.split(":", 1)
+ except ValueError:
+ continue
+ counter = plib_counter
+ has_registry_entry = True
+ break
+
+ if not has_registry_entry:
+ continue
+
+ remaining = [f for f in plib_dict[cpv] if f not in paths]
+ plib_registry.register(cpv, slot, counter, remaining)
+
+ plib_registry.store()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ self.vartree.dbapi._add(self)
+ contents = self.getcontents()
+
+ #do postinst script
+ self.settings["PORTAGE_UPDATE_ENV"] = \
+ os.path.join(self.dbpkgdir, "environment.bz2")
+ self.settings.backup_changes("PORTAGE_UPDATE_ENV")
+ try:
+ phase = EbuildPhase(background=False, phase="postinst",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ a = phase.wait()
+ if a == os.EX_OK:
+ showMessage(_(">>> %s merged.\n") % self.mycpv)
+ finally:
+ self.settings.pop("PORTAGE_UPDATE_ENV", None)
+
+ if a != os.EX_OK:
+ # It's stupid to bail out here, so keep going regardless of
+ # phase return code.
+ showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
+ level=logging.ERROR, noiselevel=-1)
+
+ downgrade = False
+ for v in otherversions:
+ if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
+ downgrade = True
+
+ # Lock the config memory file to prevent symlink creation
+ # in merge_contents from overlapping with env-update.
+ self.vartree.dbapi._fs_lock()
+ try:
+ #update environment settings, library paths. DO NOT change symlinks.
+ env_update(makelinks=(not downgrade),
+ target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
+ contents=contents, env=self.settings.environ(),
+ writemsg_level=self._display_merge)
+ finally:
+ self.vartree.dbapi._fs_unlock()
+
+ # For gcc upgrades, preserved libs have to be removed after the
+ # the library path has been updated.
+ self._prune_plib_registry()
+
+ return os.EX_OK
+
+ def _new_backup_path(self, p):
+ """
+ The works for any type path, such as a regular file, symlink,
+ or directory. The parent directory is assumed to exist.
+ The returned filename is of the form p + '.backup.' + x, where
+ x guarantees that the returned path does not exist yet.
+ """
+ os = _os_merge
+
+ x = -1
+ while True:
+ x += 1
+ backup_p = p + '.backup.' + str(x).rjust(4, '0')
+ try:
+ os.lstat(backup_p)
+ except OSError:
+ break
+
+ return backup_p
+
+ def _merge_contents(self, srcroot, destroot, cfgfiledict):
+
+ cfgfiledict_orig = cfgfiledict.copy()
+
+ # open CONTENTS file (possibly overwriting old one) for recording
+ # Use atomic_ofstream for automatic coercion of raw bytes to
+ # unicode, in order to prevent TypeError when writing raw bytes
+ # to TextIOWrapper with python2.
+ outfile = atomic_ofstream(_unicode_encode(
+ os.path.join(self.dbtmpdir, 'CONTENTS'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+
+ # Don't bump mtimes on merge since some application require
+ # preservation of timestamps. This means that the unmerge phase must
+ # check to see if file belongs to an installed instance in the same
+ # slot.
+ mymtime = None
+
+ # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
+ prevmask = os.umask(0)
+ secondhand = []
+
+ # we do a first merge; this will recurse through all files in our srcroot but also build up a
+ # "second hand" of symlinks to merge later
+ if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
+ return 1
+
+ # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
+ # broken symlinks. We'll merge them too.
+ lastlen = 0
+ while len(secondhand) and len(secondhand)!=lastlen:
+ # clear the thirdhand. Anything from our second hand that
+ # couldn't get merged will be added to thirdhand.
+
+ thirdhand = []
+ if self.mergeme(srcroot, destroot, outfile, thirdhand,
+ secondhand, cfgfiledict, mymtime):
+ return 1
+
+ #swap hands
+ lastlen = len(secondhand)
+
+ # our thirdhand now becomes our secondhand. It's ok to throw
+ # away secondhand since thirdhand contains all the stuff that
+ # couldn't be merged.
+ secondhand = thirdhand
+
+ if len(secondhand):
+ # force merge of remaining symlinks (broken or circular; oh well)
+ if self.mergeme(srcroot, destroot, outfile, None,
+ secondhand, cfgfiledict, mymtime):
+ return 1
+
+ #restore umask
+ os.umask(prevmask)
+
+ #if we opened it, close it
+ outfile.flush()
+ outfile.close()
+
+ # write out our collection of md5sums
+ if cfgfiledict != cfgfiledict_orig:
+ cfgfiledict.pop("IGNORE", None)
+ try:
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+ except InvalidLocation:
+ self.settings._init_dirs()
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+
+ return os.EX_OK
+
+ def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
+ """
+
+ This function handles actual merging of the package contents to the livefs.
+ It also handles config protection.
+
+ @param srcroot: Where are we copying files from (usually ${D})
+ @type srcroot: String (Path)
+ @param destroot: Typically ${ROOT}
+ @type destroot: String (Path)
+ @param outfile: File to log operations to
+ @type outfile: File Object
+ @param secondhand: A set of items to merge in pass two (usually
+ or symlinks that point to non-existing files that may get merged later)
+ @type secondhand: List
+ @param stufftomerge: Either a diretory to merge, or a list of items.
+ @type stufftomerge: String or List
+ @param cfgfiledict: { File:mtime } mapping for config_protected files
+ @type cfgfiledict: Dictionary
+ @param thismtime: The current time (typically long(time.time())
+ @type thismtime: Long
+ @rtype: None or Boolean
+ @returns:
+ 1. True on failure
+ 2. None otherwise
+
+ """
+
+ showMessage = self._display_merge
+ writemsg = self._display_merge
+
+ os = _os_merge
+ sep = os.sep
+ join = os.path.join
+ srcroot = normalize_path(srcroot).rstrip(sep) + sep
+ destroot = normalize_path(destroot).rstrip(sep) + sep
+ calc_prelink = "prelink-checksums" in self.settings.features
+
+ # this is supposed to merge a list of files. There will be 2 forms of argument passing.
+ if isinstance(stufftomerge, basestring):
+ #A directory is specified. Figure out protection paths, listdir() it and process it.
+ mergelist = os.listdir(join(srcroot, stufftomerge))
+ offset = stufftomerge
+ else:
+ mergelist = stufftomerge
+ offset = ""
+
+ for i, x in enumerate(mergelist):
+
+ mysrc = join(srcroot, offset, x)
+ mydest = join(destroot, offset, x)
+ # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
+ myrealdest = join(sep, offset, x)
+ # stat file once, test using S_* macros many times (faster that way)
+ mystat = os.lstat(mysrc)
+ mymode = mystat[stat.ST_MODE]
+ # handy variables; mydest is the target object on the live filesystems;
+ # mysrc is the source object in the temporary install dir
+ try:
+ mydstat = os.lstat(mydest)
+ mydmode = mydstat.st_mode
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ #dest file doesn't exist
+ mydstat = None
+ mydmode = None
+
+ if stat.S_ISLNK(mymode):
+ # we are merging a symbolic link
+ myabsto = abssymlink(mysrc)
+ if myabsto.startswith(srcroot):
+ myabsto = myabsto[len(srcroot):]
+ myabsto = myabsto.lstrip(sep)
+ myto = os.readlink(mysrc)
+ if self.settings and self.settings["D"]:
+ if myto.startswith(self.settings["D"]):
+ myto = myto[len(self.settings["D"]):]
+ # myrealto contains the path of the real file to which this symlink points.
+ # we can simply test for existence of this file to see if the target has been merged yet
+ myrealto = normalize_path(os.path.join(destroot, myabsto))
+ if mydmode!=None:
+ #destination exists
+ if stat.S_ISDIR(mydmode):
+ # we can't merge a symlink over a directory
+ newdest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a symlink is blocked by a directory:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This symlink will be merged with a different name:"))
+ msg.append(" '%s'" % newdest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ mydest = newdest
+
+ elif not stat.S_ISLNK(mydmode):
+ if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
+ # Kill file blocking installation of symlink to dir #71787
+ pass
+ elif self.isprotected(mydest):
+ # Use md5 of the target in ${D} if it exists...
+ try:
+ newmd5 = perform_md5(join(srcroot, myabsto))
+ except FileNotFound:
+ # Maybe the target is merged already.
+ try:
+ newmd5 = perform_md5(myrealto)
+ except FileNotFound:
+ newmd5 = None
+ mydest = new_protect_filename(mydest, newmd5=newmd5)
+
+ # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
+ if (secondhand != None) and (not os.path.exists(myrealto)):
+ # either the target directory doesn't exist yet or the target file doesn't exist -- or
+ # the target is a broken symlink. We will add this file to our "second hand" and merge
+ # it later.
+ secondhand.append(mysrc[len(srcroot):])
+ continue
+ # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
+ mymtime = movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ encoding=_encodings['merge'])
+ if mymtime != None:
+ showMessage(">>> %s -> %s\n" % (mydest, myto))
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ else:
+ showMessage(_("!!! Failed to move file.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ showMessage("!!! %s -> %s\n" % (mydest, myto),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ elif stat.S_ISDIR(mymode):
+ # we are merging a directory
+ if mydmode != None:
+ # destination exists
+
+ if bsd_chflags:
+ # Save then clear flags on dest.
+ dflags = mydstat.st_flags
+ if dflags != 0:
+ bsd_chflags.lchflags(mydest, 0)
+
+ if not os.access(mydest, os.W_OK):
+ pkgstuff = pkgsplit(self.pkg)
+ writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
+ writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
+ writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
+ writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
+ writemsg(_("!!! And finish by running this: env-update\n\n"))
+ return 1
+
+ if stat.S_ISDIR(mydmode) or \
+ (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
+ # a symlink to an existing directory will work for us; keep it:
+ showMessage("--- %s/\n" % mydest)
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ else:
+ # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
+ backup_dest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a directory is blocked by a file:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This file will be renamed to a different name:"))
+ msg.append(" '%s'" % backup_dest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ if movefile(mydest, backup_dest,
+ mysettings=self.settings,
+ encoding=_encodings['merge']) is None:
+ return 1
+ showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
+ level=logging.ERROR, noiselevel=-1)
+ #now create our directory
+ try:
+ if self.settings.selinux_enabled():
+ _selinux_merge.mkdir(mydest, mysrc)
+ else:
+ os.mkdir(mydest)
+ except OSError as e:
+ # Error handling should be equivalent to
+ # portage.util.ensure_dirs() for cases
+ # like bug #187518.
+ if e.errno in (errno.EEXIST,):
+ pass
+ elif os.path.isdir(mydest):
+ pass
+ else:
+ raise
+ del e
+
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ os.chmod(mydest, mystat[0])
+ os.chown(mydest, mystat[4], mystat[5])
+ showMessage(">>> %s/\n" % mydest)
+ else:
+ try:
+ #destination doesn't exist
+ if self.settings.selinux_enabled():
+ _selinux_merge.mkdir(mydest, mysrc)
+ else:
+ os.mkdir(mydest)
+ except OSError as e:
+ # Error handling should be equivalent to
+ # portage.util.ensure_dirs() for cases
+ # like bug #187518.
+ if e.errno in (errno.EEXIST,):
+ pass
+ elif os.path.isdir(mydest):
+ pass
+ else:
+ raise
+ del e
+ os.chmod(mydest, mystat[0])
+ os.chown(mydest, mystat[4], mystat[5])
+ showMessage(">>> %s/\n" % mydest)
+ outfile.write("dir "+myrealdest+"\n")
+ # recurse and merge this directory
+ if self.mergeme(srcroot, destroot, outfile, secondhand,
+ join(offset, x), cfgfiledict, thismtime):
+ return 1
+ elif stat.S_ISREG(mymode):
+ # we are merging a regular file
+ mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
+ # calculate config file protection stuff
+ mydestdir = os.path.dirname(mydest)
+ moveme = 1
+ zing = "!!!"
+ mymtime = None
+ protected = self.isprotected(mydest)
+ if mydmode != None:
+ # destination file exists
+
+ if stat.S_ISDIR(mydmode):
+ # install of destination is blocked by an existing directory with the same name
+ newdest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a regular file is blocked by a directory:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This file will be merged with a different name:"))
+ msg.append(" '%s'" % newdest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ mydest = newdest
+
+ elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
+ # install of destination is blocked by an existing regular file,
+ # or by a symlink to an existing regular file;
+ # now, config file management may come into play.
+ # we only need to tweak mydest if cfg file management is in play.
+ if protected:
+ # we have a protection path; enable config file management.
+ cfgprot = 0
+ destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
+ if mymd5 == destmd5:
+ #file already in place; simply update mtimes of destination
+ moveme = 1
+ else:
+ if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
+ """ An identical update has previously been
+ merged. Skip it unless the user has chosen
+ --noconfmem."""
+ moveme = cfgfiledict["IGNORE"]
+ cfgprot = cfgfiledict["IGNORE"]
+ if not moveme:
+ zing = "---"
+ mymtime = mystat[stat.ST_MTIME]
+ else:
+ moveme = 1
+ cfgprot = 1
+ if moveme:
+ # Merging a new file, so update confmem.
+ cfgfiledict[myrealdest] = [mymd5]
+ elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
+ """A previously remembered update has been
+ accepted, so it is removed from confmem."""
+ del cfgfiledict[myrealdest]
+
+ if cfgprot:
+ mydest = new_protect_filename(mydest, newmd5=mymd5)
+
+ # whether config protection or not, we merge the new file the
+ # same way. Unless moveme=0 (blocking directory)
+ if moveme:
+ # Create hardlinks only for source files that already exist
+ # as hardlinks (having identical st_dev and st_ino).
+ hardlink_key = (mystat.st_dev, mystat.st_ino)
+
+ hardlink_candidates = self._md5_merge_map.get(hardlink_key)
+ if hardlink_candidates is None:
+ hardlink_candidates = []
+ self._md5_merge_map[hardlink_key] = hardlink_candidates
+
+ mymtime = movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ hardlink_candidates=hardlink_candidates,
+ encoding=_encodings['merge'])
+ if mymtime is None:
+ return 1
+ if hardlink_candidates is not None:
+ hardlink_candidates.append(mydest)
+ zing = ">>>"
+
+ if mymtime != None:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ showMessage("%s %s\n" % (zing,mydest))
+ else:
+ # we are merging a fifo or device node
+ zing = "!!!"
+ if mydmode is None:
+ # destination doesn't exist
+ if movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ encoding=_encodings['merge']) is not None:
+ zing = ">>>"
+ else:
+ return 1
+ if stat.S_ISFIFO(mymode):
+ outfile.write("fif %s\n" % myrealdest)
+ else:
+ outfile.write("dev %s\n" % myrealdest)
+ showMessage(zing + " " + mydest + "\n")
+
+ def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
+ mydbapi=None, prev_mtimes=None, counter=None):
+ """
+ @param myroot: ignored, self._eroot is used instead
+ """
+ myroot = None
+ retval = -1
+ parallel_install = "parallel-install" in self.settings.features
+ if not parallel_install:
+ self.lockdb()
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ if self._scheduler is None:
+ self._scheduler = PollScheduler().sched_iface
+ try:
+ retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
+ cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
+ counter=counter)
+
+ # If PORTAGE_BUILDDIR doesn't exist, then it probably means
+ # fail-clean is enabled, and the success/die hooks have
+ # already been called by EbuildPhase.
+ if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+
+ if retval == os.EX_OK:
+ phase = 'success_hooks'
+ else:
+ phase = 'die_hooks'
+
+ ebuild_phase = MiscFunctionsProcess(
+ background=False, commands=[phase],
+ scheduler=self._scheduler, settings=self.settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ self._elog_process()
+
+ if 'noclean' not in self.settings.features and \
+ (retval == os.EX_OK or \
+ 'fail-clean' in self.settings.features):
+ if myebuild is None:
+ myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+
+ doebuild_environment(myebuild, "clean",
+ settings=self.settings, db=mydbapi)
+ phase = EbuildPhase(background=False, phase="clean",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ phase.wait()
+ finally:
+ self.settings.pop('REPLACING_VERSIONS', None)
+ if self.vartree.dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ self.vartree.dbapi._linkmap._clear_cache()
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ if not parallel_install:
+ self.unlockdb()
+ return retval
+
+ def getstring(self,name):
+ "returns contents of a file with whitespace converted to spaces"
+ if not os.path.exists(self.dbdir+"/"+name):
+ return ""
+ mydata = io.open(
+ _unicode_encode(os.path.join(self.dbdir, name),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ).read().split()
+ return " ".join(mydata)
+
+ def copyfile(self,fname):
+ shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
+
+ def getfile(self,fname):
+ if not os.path.exists(self.dbdir+"/"+fname):
+ return ""
+ return io.open(_unicode_encode(os.path.join(self.dbdir, fname),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ).read()
+
+ def setfile(self,fname,data):
+ kwargs = {}
+ if fname == 'environment.bz2' or not isinstance(data, basestring):
+ kwargs['mode'] = 'wb'
+ else:
+ kwargs['mode'] = 'w'
+ kwargs['encoding'] = _encodings['repo.content']
+ write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
+
+ def getelements(self,ename):
+ if not os.path.exists(self.dbdir+"/"+ename):
+ return []
+ mylines = io.open(_unicode_encode(
+ os.path.join(self.dbdir, ename),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ).readlines()
+ myreturn = []
+ for x in mylines:
+ for y in x[:-1].split():
+ myreturn.append(y)
+ return myreturn
+
+ def setelements(self,mylist,ename):
+ myelement = io.open(_unicode_encode(
+ os.path.join(self.dbdir, ename),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ for x in mylist:
+ myelement.write(_unicode_decode(x+"\n"))
+ myelement.close()
+
+ def isregular(self):
+ "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
+ return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
+
+def merge(mycat, mypkg, pkgloc, infloc,
+ myroot=None, settings=None, myebuild=None,
+ mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
+ scheduler=None):
+ """
+ @param myroot: ignored, settings['EROOT'] is used instead
+ """
+ myroot = None
+ if settings is None:
+ raise TypeError("settings argument is required")
+ if not os.access(settings['EROOT'], os.W_OK):
+ writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
+ noiselevel=-1)
+ return errno.EACCES
+ background = (settings.get('PORTAGE_BACKGROUND') == '1')
+ merge_task = MergeProcess(
+ mycat=mycat, mypkg=mypkg, settings=settings,
+ treetype=mytree, vartree=vartree,
+ scheduler=(scheduler or PollScheduler().sched_iface),
+ background=background, blockers=blockers, pkgloc=pkgloc,
+ infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
+ prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
+ merge_task.start()
+ retcode = merge_task.wait()
+ return retcode
+
+def unmerge(cat, pkg, myroot=None, settings=None,
+ mytrimworld=None, vartree=None,
+ ldpath_mtimes=None, scheduler=None):
+ """
+ @param myroot: ignored, settings['EROOT'] is used instead
+ @param mytrimworld: ignored
+ """
+ myroot = None
+ if settings is None:
+ raise TypeError("settings argument is required")
+ mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
+ vartree=vartree, scheduler=scheduler)
+ vartree = mylink.vartree
+ parallel_install = "parallel-install" in settings.features
+ if not parallel_install:
+ mylink.lockdb()
+ try:
+ if mylink.exists():
+ retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
+ if retval == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ return retval
+ return os.EX_OK
+ finally:
+ if vartree.dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ vartree.dbapi._linkmap._clear_cache()
+ if not parallel_install:
+ mylink.unlockdb()
+
+def write_contents(contents, root, f):
+ """
+ Write contents to any file like object. The file will be left open.
+ """
+ root_len = len(root) - 1
+ for filename in sorted(contents):
+ entry_data = contents[filename]
+ entry_type = entry_data[0]
+ relative_filename = filename[root_len:]
+ if entry_type == "obj":
+ entry_type, mtime, md5sum = entry_data
+ line = "%s %s %s %s\n" % \
+ (entry_type, relative_filename, md5sum, mtime)
+ elif entry_type == "sym":
+ entry_type, mtime, link = entry_data
+ line = "%s %s -> %s %s\n" % \
+ (entry_type, relative_filename, link, mtime)
+ else: # dir, dev, fif
+ line = "%s %s\n" % (entry_type, relative_filename)
+ f.write(line)
+
+def tar_contents(contents, root, tar, protect=None, onProgress=None):
+ os = _os_merge
+
+ try:
+ for x in contents:
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in contents:
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
+ id_strings = {}
+ maxval = len(contents)
+ curval = 0
+ if onProgress:
+ onProgress(maxval, 0)
+ paths = list(contents)
+ paths.sort()
+ for path in paths:
+ curval += 1
+ try:
+ lst = os.lstat(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ if onProgress:
+ onProgress(maxval, curval)
+ continue
+ contents_type = contents[path][0]
+ if path.startswith(root):
+ arcname = path[len(root):]
+ else:
+ raise ValueError("invalid root argument: '%s'" % root)
+ live_path = path
+ if 'dir' == contents_type and \
+ not stat.S_ISDIR(lst.st_mode) and \
+ os.path.isdir(live_path):
+ # Even though this was a directory in the original ${D}, it exists
+ # as a symlink to a directory in the live filesystem. It must be
+ # recorded as a real directory in the tar file to ensure that tar
+ # can properly extract it's children.
+ live_path = os.path.realpath(live_path)
+ tarinfo = tar.gettarinfo(live_path, arcname)
+
+ if stat.S_ISREG(lst.st_mode):
+ if protect and protect(path):
+ # Create an empty file as a place holder in order to avoid
+ # potential collision-protect issues.
+ f = tempfile.TemporaryFile()
+ f.write(_unicode_encode(
+ "# empty file because --include-config=n " + \
+ "when `quickpkg` was used\n"))
+ f.flush()
+ f.seek(0)
+ tarinfo.size = os.fstat(f.fileno()).st_size
+ tar.addfile(tarinfo, f)
+ f.close()
+ else:
+ f = open(_unicode_encode(path,
+ encoding=object.__getattribute__(os, '_encoding'),
+ errors='strict'), 'rb')
+ try:
+ tar.addfile(tarinfo, f)
+ finally:
+ f.close()
+ else:
+ tar.addfile(tarinfo)
+ if onProgress:
+ onProgress(maxval, curval)
diff --git a/portage_with_autodep/pym/portage/dbapi/virtual.py b/portage_with_autodep/pym/portage/dbapi/virtual.py
new file mode 100644
index 0000000..ec97ffe
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/virtual.py
@@ -0,0 +1,131 @@
+# Copyright 1998-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+from portage.dbapi import dbapi
+from portage import cpv_getkey
+
+class fakedbapi(dbapi):
+ """A fake dbapi that allows consumers to inject/remove packages to/from it
+ portage.settings is required to maintain the dbAPI.
+ """
+ def __init__(self, settings=None, exclusive_slots=True):
+ """
+ @param exclusive_slots: When True, injecting a package with SLOT
+ metadata causes an existing package in the same slot to be
+ automatically removed (default is True).
+ @type exclusive_slots: Boolean
+ """
+ self._exclusive_slots = exclusive_slots
+ self.cpvdict = {}
+ self.cpdict = {}
+ if settings is None:
+ from portage import settings
+ self.settings = settings
+ self._match_cache = {}
+
+ def _clear_cache(self):
+ if self._categories is not None:
+ self._categories = None
+ if self._match_cache:
+ self._match_cache = {}
+
+ def match(self, origdep, use_cache=1):
+ result = self._match_cache.get(origdep, None)
+ if result is not None:
+ return result[:]
+ result = dbapi.match(self, origdep, use_cache=use_cache)
+ self._match_cache[origdep] = result
+ return result[:]
+
+ def cpv_exists(self, mycpv, myrepo=None):
+ return mycpv in self.cpvdict
+
+ def cp_list(self, mycp, use_cache=1, myrepo=None):
+ cachelist = self._match_cache.get(mycp)
+ # cp_list() doesn't expand old-style virtuals
+ if cachelist and cachelist[0].startswith(mycp):
+ return cachelist[:]
+ cpv_list = self.cpdict.get(mycp)
+ if cpv_list is None:
+ cpv_list = []
+ self._cpv_sort_ascending(cpv_list)
+ if not (not cpv_list and mycp.startswith("virtual/")):
+ self._match_cache[mycp] = cpv_list
+ return cpv_list[:]
+
+ def cp_all(self):
+ return list(self.cpdict)
+
+ def cpv_all(self):
+ return list(self.cpvdict)
+
+ def cpv_inject(self, mycpv, metadata=None):
+ """Adds a cpv to the list of available packages. See the
+ exclusive_slots constructor parameter for behavior with
+ respect to SLOT metadata.
+ @param mycpv: cpv for the package to inject
+ @type mycpv: str
+ @param metadata: dictionary of raw metadata for aux_get() calls
+ @param metadata: dict
+ """
+ self._clear_cache()
+ mycp = cpv_getkey(mycpv)
+ self.cpvdict[mycpv] = metadata
+ myslot = None
+ if self._exclusive_slots and metadata:
+ myslot = metadata.get("SLOT", None)
+ if myslot and mycp in self.cpdict:
+ # If necessary, remove another package in the same SLOT.
+ for cpv in self.cpdict[mycp]:
+ if mycpv != cpv:
+ other_metadata = self.cpvdict[cpv]
+ if other_metadata:
+ if myslot == other_metadata.get("SLOT", None):
+ self.cpv_remove(cpv)
+ break
+ if mycp not in self.cpdict:
+ self.cpdict[mycp] = []
+ if not mycpv in self.cpdict[mycp]:
+ self.cpdict[mycp].append(mycpv)
+
+ def cpv_remove(self,mycpv):
+ """Removes a cpv from the list of available packages."""
+ self._clear_cache()
+ mycp = cpv_getkey(mycpv)
+ if mycpv in self.cpvdict:
+ del self.cpvdict[mycpv]
+ if mycp not in self.cpdict:
+ return
+ while mycpv in self.cpdict[mycp]:
+ del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
+ if not len(self.cpdict[mycp]):
+ del self.cpdict[mycp]
+
+ def aux_get(self, mycpv, wants, myrepo=None):
+ if not self.cpv_exists(mycpv):
+ raise KeyError(mycpv)
+ metadata = self.cpvdict[mycpv]
+ if not metadata:
+ return ["" for x in wants]
+ return [metadata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ self._clear_cache()
+ self.cpvdict[cpv].update(values)
+
+class testdbapi(object):
+ """A dbapi instance with completely fake functions to get by hitting disk
+ TODO(antarus):
+ This class really needs to be rewritten to have better stubs; but these work for now.
+ The dbapi classes themselves need unit tests...and that will be a lot of work.
+ """
+
+ def __init__(self):
+ self.cpvs = {}
+ def f(*args, **kwargs):
+ return True
+ fake_api = dir(dbapi)
+ for call in fake_api:
+ if not hasattr(self, call):
+ setattr(self, call, f)
diff --git a/portage_with_autodep/pym/portage/debug.py b/portage_with_autodep/pym/portage/debug.py
new file mode 100644
index 0000000..ce642fe
--- /dev/null
+++ b/portage_with_autodep/pym/portage/debug.py
@@ -0,0 +1,120 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import sys
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage.const
+from portage.util import writemsg
+
+def set_trace(on=True):
+ if on:
+ t = trace_handler()
+ threading.settrace(t.event_handler)
+ sys.settrace(t.event_handler)
+ else:
+ sys.settrace(None)
+ threading.settrace(None)
+
+class trace_handler(object):
+
+ def __init__(self):
+ python_system_paths = []
+ for x in sys.path:
+ if os.path.basename(x).startswith("python2."):
+ python_system_paths.append(x)
+
+ self.ignore_prefixes = []
+ for x in python_system_paths:
+ self.ignore_prefixes.append(x + os.sep)
+
+ self.trim_filename = prefix_trimmer(os.path.join(portage.const.PORTAGE_BASE_PATH, "pym") + os.sep).trim
+ self.show_local_lines = False
+ self.max_repr_length = 200
+
+ def event_handler(self, *args):
+ frame, event, arg = args
+ if "line" == event:
+ if self.show_local_lines:
+ self.trace_line(*args)
+ else:
+ if not self.ignore_filename(frame.f_code.co_filename):
+ self.trace_event(*args)
+ return self.event_handler
+
+ def trace_event(self, frame, event, arg):
+ writemsg("%s line=%d name=%s event=%s %slocals=%s\n" % \
+ (self.trim_filename(frame.f_code.co_filename),
+ frame.f_lineno,
+ frame.f_code.co_name,
+ event,
+ self.arg_repr(frame, event, arg),
+ self.locals_repr(frame, event, arg)))
+
+ def arg_repr(self, frame, event, arg):
+ my_repr = None
+ if "return" == event:
+ my_repr = repr(arg)
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "value=%s " % my_repr
+ elif "exception" == event:
+ my_repr = repr(arg[1])
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "type=%s value=%s " % (arg[0], my_repr)
+
+ return ""
+
+ def trace_line(self, frame, event, arg):
+ writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
+
+ def ignore_filename(self, filename):
+ if filename:
+ for x in self.ignore_prefixes:
+ if filename.startswith(x):
+ return True
+ return False
+
+ def locals_repr(self, frame, event, arg):
+ """Create a representation of the locals dict that is suitable for
+ tracing output."""
+
+ my_locals = frame.f_locals.copy()
+
+ # prevent unsafe __repr__ call on self when __init__ is called
+ # (method calls aren't safe until after __init__ has completed).
+ if frame.f_code.co_name == "__init__" and "self" in my_locals:
+ my_locals["self"] = "omitted"
+
+ # We omit items that will lead to unreasonable bloat of the trace
+ # output (and resulting log file).
+ for k, v in my_locals.items():
+ my_repr = repr(v)
+ if len(my_repr) > self.max_repr_length:
+ my_locals[k] = "omitted"
+ return my_locals
+
+class prefix_trimmer(object):
+ def __init__(self, prefix):
+ self.prefix = prefix
+ self.cut_index = len(prefix)
+ self.previous = None
+ self.previous_trimmed = None
+
+ def trim(self, s):
+ """Remove a prefix from the string and return the result.
+ The previous result is automatically cached."""
+ if s == self.previous:
+ return self.previous_trimmed
+ else:
+ if s.startswith(self.prefix):
+ self.previous_trimmed = s[self.cut_index:]
+ else:
+ self.previous_trimmed = s
+ return self.previous_trimmed
diff --git a/portage_with_autodep/pym/portage/dep/__init__.py b/portage_with_autodep/pym/portage/dep/__init__.py
new file mode 100644
index 0000000..fd5ad30
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dep/__init__.py
@@ -0,0 +1,2432 @@
+# deps.py -- Portage dependency resolution functions
+# Copyright 2003-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+ 'Atom', 'best_match_to_list', 'cpvequal',
+ 'dep_getcpv', 'dep_getkey', 'dep_getslot',
+ 'dep_getusedeps', 'dep_opconvert', 'flatten',
+ 'get_operator', 'isjustname', 'isspecific',
+ 'isvalidatom', 'match_from_list', 'match_to_list',
+ 'paren_enclose', 'paren_normalize', 'paren_reduce',
+ 'remove_slot', 'strip_empty', 'use_reduce',
+ '_repo_separator', '_slot_separator',
+]
+
+# DEPEND SYNTAX:
+#
+# 'use?' only affects the immediately following word!
+# Nesting is the only legal way to form multiple '[!]use?' requirements.
+#
+# Where: 'a' and 'b' are use flags, and 'z' is a depend atom.
+#
+# "a? z" -- If 'a' in [use], then b is valid.
+# "a? ( z )" -- Syntax with parenthesis.
+# "a? b? z" -- Deprecated.
+# "a? ( b? z )" -- Valid
+# "a? ( b? ( z ) ) -- Valid
+#
+
+import re, sys
+import warnings
+from itertools import chain
+from portage import _unicode_decode
+from portage.eapi import eapi_has_slot_deps, eapi_has_src_uri_arrows, \
+ eapi_has_use_deps, eapi_has_strong_blocks, eapi_has_use_dep_defaults
+from portage.exception import InvalidAtom, InvalidData, InvalidDependString
+from portage.localization import _
+from portage.versions import catpkgsplit, catsplit, \
+ pkgcmp, ververify, _cp, _cpv
+import portage.cache.mappings
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+# Api consumers included in portage should set this to True.
+# Once the relevant api changes are in a portage release with
+# stable keywords, make these warnings unconditional.
+_internal_warnings = False
+
+def cpvequal(cpv1, cpv2):
+ """
+
+ @param cpv1: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
+ @type cpv1: String
+ @param cpv2: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
+ @type cpv2: String
+ @rtype: Boolean
+ @returns:
+ 1. True if cpv1 = cpv2
+ 2. False Otherwise
+ 3. Throws PortageException if cpv1 or cpv2 is not a CPV
+
+ Example Usage:
+ >>> from portage.dep import cpvequal
+ >>> cpvequal("sys-apps/portage-2.1","sys-apps/portage-2.1")
+ >>> True
+
+ """
+
+ split1 = catpkgsplit(cpv1)
+ split2 = catpkgsplit(cpv2)
+
+ if not split1 or not split2:
+ raise portage.exception.PortageException(_("Invalid data '%s, %s', parameter was not a CPV") % (cpv1, cpv2))
+
+ if split1[0] != split2[0]:
+ return False
+
+ return (pkgcmp(split1[1:], split2[1:]) == 0)
+
+def strip_empty(myarr):
+ """
+ Strip all empty elements from an array
+
+ @param myarr: The list of elements
+ @type myarr: List
+ @rtype: Array
+ @return: The array with empty elements removed
+ """
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.strip_empty',), DeprecationWarning, stacklevel=2)
+ return [x for x in myarr if x]
+
+def paren_reduce(mystr):
+ """
+ Take a string and convert all paren enclosed entities into sublists and
+ split the list elements by spaces. All redundant brackets are removed.
+
+ Example usage:
+ >>> paren_reduce('foobar foo? ( bar baz )')
+ ['foobar', 'foo?', ['bar', 'baz']]
+
+ @param mystr: The string to reduce
+ @type mystr: String
+ @rtype: Array
+ @return: The reduced string in an array
+ """
+ if _internal_warnings:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.paren_reduce',), DeprecationWarning, stacklevel=2)
+ mysplit = mystr.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def ends_in_operator(k):
+ return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
+ if len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ stack[level].extend(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ stack[level].append(l)
+
+ if l:
+ if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif len(l) == 1 and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A
+ stack[level].pop()
+ special_append()
+ elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
+ #Optimize: || ( || ( ... ) ) -> || ( ... )
+ # foo? ( foo? ( ... ) ) -> foo? ( ... )
+ # || ( foo? ( ... ) ) -> foo? ( ... )
+ stack[level].pop()
+ special_append()
+ else:
+ special_append()
+ else:
+ if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
+ stack[level].pop()
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ elif token == "||":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ if token[-1] == "?":
+ need_bracket = True
+
+ stack[level].append(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ return stack[0]
+
+class paren_normalize(list):
+ """Take a dependency structure as returned by paren_reduce or use_reduce
+ and generate an equivalent structure that has no redundant lists."""
+ def __init__(self, src):
+ if _internal_warnings:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.paren_normalize',), DeprecationWarning, stacklevel=2)
+ list.__init__(self)
+ self._zap_parens(src, self)
+
+ def _zap_parens(self, src, dest, disjunction=False):
+ if not src:
+ return dest
+ i = iter(src)
+ for x in i:
+ if isinstance(x, basestring):
+ if x in ('||', '^^'):
+ y = self._zap_parens(next(i), [], disjunction=True)
+ if len(y) == 1:
+ dest.append(y[0])
+ else:
+ dest.append(x)
+ dest.append(y)
+ elif x.endswith("?"):
+ dest.append(x)
+ dest.append(self._zap_parens(next(i), []))
+ else:
+ dest.append(x)
+ else:
+ if disjunction:
+ x = self._zap_parens(x, [])
+ if len(x) == 1:
+ dest.append(x[0])
+ else:
+ dest.append(x)
+ else:
+ self._zap_parens(x, dest)
+ return dest
+
+def paren_enclose(mylist, unevaluated_atom=False):
+ """
+ Convert a list to a string with sublists enclosed with parens.
+
+ Example usage:
+ >>> test = ['foobar','foo',['bar','baz']]
+ >>> paren_enclose(test)
+ 'foobar foo ( bar baz )'
+
+ @param mylist: The list
+ @type mylist: List
+ @rtype: String
+ @return: The paren enclosed string
+ """
+ mystrparts = []
+ for x in mylist:
+ if isinstance(x, list):
+ mystrparts.append("( "+paren_enclose(x)+" )")
+ else:
+ if unevaluated_atom:
+ x = getattr(x, 'unevaluated_atom', x)
+ mystrparts.append(x)
+ return " ".join(mystrparts)
+
+def use_reduce(depstr, uselist=[], masklist=[], matchall=False, excludeall=[], is_src_uri=False, \
+ eapi=None, opconvert=False, flat=False, is_valid_flag=None, token_class=None, matchnone=False):
+ """
+ Takes a dep string and reduces the use? conditionals out, leaving an array
+ with subarrays. All redundant brackets are removed.
+
+ @param deparray: depstring
+ @type deparray: String
+ @param uselist: List of use enabled flags
+ @type uselist: List
+ @param masklist: List of masked flags (always treated as disabled)
+ @type masklist: List
+ @param matchall: Treat all conditionals as active. Used by repoman.
+ @type matchall: Bool
+ @param excludeall: List of flags for which negated conditionals are always treated as inactive.
+ @type excludeall: List
+ @param is_src_uri: Indicates if depstr represents a SRC_URI
+ @type is_src_uri: Bool
+ @param eapi: Indicates the EAPI the dep string has to comply to
+ @type eapi: String
+ @param opconvert: Put every operator as first element into it's argument list
+ @type opconvert: Bool
+ @param flat: Create a flat list of all tokens
+ @type flat: Bool
+ @param is_valid_flag: Function that decides if a given use flag might be used in use conditionals
+ @type is_valid_flag: Function
+ @param token_class: Convert all non operator tokens into this class
+ @type token_class: Class
+ @param matchnone: Treat all conditionals as inactive. Used by digestgen().
+ @type matchnone: Bool
+ @rtype: List
+ @return: The use reduced depend array
+ """
+ if isinstance(depstr, list):
+ if _internal_warnings:
+ warnings.warn(_("Passing paren_reduced dep arrays to %s is deprecated. " + \
+ "Pass the original dep string instead.") % \
+ ('portage.dep.use_reduce',), DeprecationWarning, stacklevel=2)
+ depstr = paren_enclose(depstr)
+
+ if opconvert and flat:
+ raise ValueError("portage.dep.use_reduce: 'opconvert' and 'flat' are mutually exclusive")
+
+ if matchall and matchnone:
+ raise ValueError("portage.dep.use_reduce: 'matchall' and 'matchnone' are mutually exclusive")
+
+ useflag_re = _get_useflag_re(eapi)
+
+ def is_active(conditional):
+ """
+ Decides if a given use conditional is active.
+ """
+ if conditional.startswith("!"):
+ flag = conditional[1:-1]
+ is_negated = True
+ else:
+ flag = conditional[:-1]
+ is_negated = False
+
+ if is_valid_flag:
+ if not is_valid_flag(flag):
+ msg = _("USE flag '%s' referenced in " + \
+ "conditional '%s' is not in IUSE") \
+ % (flag, conditional)
+ e = InvalidData(msg, category='IUSE.missing')
+ raise InvalidDependString(msg, errors=(e,))
+ else:
+ if useflag_re.match(flag) is None:
+ raise InvalidDependString(
+ _("invalid use flag '%s' in conditional '%s'") % (flag, conditional))
+
+ if is_negated and flag in excludeall:
+ return False
+
+ if flag in masklist:
+ return is_negated
+
+ if matchall:
+ return True
+
+ if matchnone:
+ return False
+
+ return (flag in uselist and not is_negated) or \
+ (flag not in uselist and is_negated)
+
+ def missing_white_space_check(token, pos):
+ """
+ Used to generate good error messages for invalid tokens.
+ """
+ for x in (")", "(", "||"):
+ if token.startswith(x) or token.endswith(x):
+ raise InvalidDependString(
+ _("missing whitespace around '%s' at '%s', token %s") % (x, token, pos+1))
+
+ mysplit = depstr.split()
+ #Count the bracket level.
+ level = 0
+ #We parse into a stack. Every time we hit a '(', a new empty list is appended to the stack.
+ #When we hit a ')', the last list in the stack is merged with list one level up.
+ stack = [[]]
+ #Set need_bracket to True after use conditionals or ||. Other tokens need to ensure
+ #that need_bracket is not True.
+ need_bracket = False
+ #Set need_simple_token to True after a SRC_URI arrow. Other tokens need to ensure
+ #that need_simple_token is not True.
+ need_simple_token = False
+
+ for pos, token in enumerate(mysplit):
+ if token == "(":
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if len(mysplit) >= pos+2 and mysplit[pos+1] == ")":
+ raise InvalidDependString(
+ _("expected: dependency string, got: ')', token %s") % (pos+1,))
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+
+ is_single = len(l) == 1 or \
+ (opconvert and l and l[0] == "||") or \
+ (not opconvert and len(l)==2 and l[0] == "||")
+ ignore = False
+
+ if flat:
+ #In 'flat' mode, we simply merge all lists into a single large one.
+ if stack[level] and stack[level][-1][-1] == "?":
+ #The last token before the '(' that matches the current ')'
+ #was a use conditional. The conditional is removed in any case.
+ #Merge the current list if needed.
+ if is_active(stack[level][-1]):
+ stack[level].pop()
+ stack[level].extend(l)
+ else:
+ stack[level].pop()
+ else:
+ stack[level].extend(l)
+ continue
+
+ if stack[level]:
+ if stack[level][-1] == "||" and not l:
+ #Optimize: || ( ) -> .
+ stack[level].pop()
+ elif stack[level][-1][-1] == "?":
+ #The last token before the '(' that matches the current ')'
+ #was a use conditional, remove it and decide if we
+ #have to keep the current list.
+ if not is_active(stack[level][-1]):
+ ignore = True
+ stack[level].pop()
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def starts_with_any_of_dep(k):
+ #'ends_in_any_of_dep' for opconvert
+ return k>=0 and stack[k] and stack[k][0] == "||"
+
+ def last_any_of_operator_level(k):
+ #Returns the level of the last || operator if it is in effect for
+ #the current level. It is not in effect, if there is a level, that
+ #ends in a non-operator. This is almost equivalent to stack[level][-1]=="||",
+ #expect that it skips empty levels.
+ while k>=0:
+ if stack[k]:
+ if stack[k][-1] == "||":
+ return k
+ elif stack[k][-1][-1] != "?":
+ return -1
+ k -= 1
+ return -1
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single:
+ #Either [A], [[...]] or [|| [...]]
+ if l[0] == "||" and ends_in_any_of_dep(level-1):
+ if opconvert:
+ stack[level].extend(l[1:])
+ else:
+ stack[level].extend(l[1])
+ elif len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ last = last_any_of_operator_level(level-1)
+ if last == -1:
+ if opconvert and isinstance(l[0], list) \
+ and l[0] and l[0][0] == '||':
+ stack[level].append(l[0])
+ else:
+ stack[level].extend(l[0])
+ else:
+ if opconvert and l[0] and l[0][0] == "||":
+ stack[level].extend(l[0][1:])
+ else:
+ stack[level].append(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ if opconvert and stack[level] and stack[level][-1] == '||':
+ stack[level][-1] = ['||'] + l
+ else:
+ stack[level].append(l)
+
+ if l and not ignore:
+ #The current list is not empty and we don't want to ignore it because
+ #of an inactive use conditional.
+ if not ends_in_any_of_dep(level-1) and not ends_in_any_of_dep(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif is_single and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A, || ( || ( ... ) ) -> || ( ... )
+ stack[level].pop()
+ special_append()
+ elif ends_in_any_of_dep(level) and ends_in_any_of_dep(level-1):
+ #Optimize: || ( A || ( B C ) ) -> || ( A B C )
+ stack[level].pop()
+ stack[level].extend(l)
+ else:
+ if opconvert and ends_in_any_of_dep(level):
+ #In opconvert mode, we have to move the operator from the level
+ #above into the current list.
+ stack[level].pop()
+ stack[level].append(["||"] + l)
+ else:
+ special_append()
+
+ else:
+ raise InvalidDependString(
+ _("no matching '%s' for '%s', token %s") % ("(", ")", pos+1))
+ elif token == "||":
+ if is_src_uri:
+ raise InvalidDependString(
+ _("any-of dependencies are not allowed in SRC_URI: token %s") % (pos+1,))
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+ need_bracket = True
+ stack[level].append(token)
+ elif token == "->":
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if not is_src_uri:
+ raise InvalidDependString(
+ _("SRC_URI arrow are only allowed in SRC_URI: token %s") % (pos+1,))
+ if eapi is None or not eapi_has_src_uri_arrows(eapi):
+ raise InvalidDependString(
+ _("SRC_URI arrow not allowed in EAPI %s: token %s") % (eapi, pos+1))
+ need_simple_token = True
+ stack[level].append(token)
+ else:
+ missing_white_space_check(token, pos)
+
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+
+ if need_simple_token and "/" in token:
+ #The last token was a SRC_URI arrow, make sure we have a simple file name.
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+
+ if token[-1] == "?":
+ need_bracket = True
+ else:
+ need_simple_token = False
+ if token_class and not is_src_uri:
+ #Add a hack for SRC_URI here, to avoid conditional code at the consumer level
+ try:
+ token = token_class(token, eapi=eapi,
+ is_valid_flag=is_valid_flag)
+ except InvalidAtom as e:
+ raise InvalidDependString(
+ _("Invalid atom (%s), token %s") \
+ % (e, pos+1), errors=(e,))
+ except SystemExit:
+ raise
+ except Exception as e:
+ raise InvalidDependString(
+ _("Invalid token '%s', token %s") % (token, pos+1))
+
+ if not matchall and \
+ hasattr(token, 'evaluate_conditionals'):
+ token = token.evaluate_conditionals(uselist)
+
+ stack[level].append(token)
+
+ if level != 0:
+ raise InvalidDependString(
+ _("Missing '%s' at end of string") % (")",))
+
+ if need_bracket:
+ raise InvalidDependString(
+ _("Missing '%s' at end of string") % ("(",))
+
+ if need_simple_token:
+ raise InvalidDependString(
+ _("Missing file name at end of string"))
+
+ return stack[0]
+
+def dep_opconvert(deplist):
+ """
+ Iterate recursively through a list of deps, if the
+ dep is a '||' or '&&' operator, combine it with the
+ list of deps that follows..
+
+ Example usage:
+ >>> test = ["blah", "||", ["foo", "bar", "baz"]]
+ >>> dep_opconvert(test)
+ ['blah', ['||', 'foo', 'bar', 'baz']]
+
+ @param deplist: A list of deps to format
+ @type mydep: List
+ @rtype: List
+ @return:
+ The new list with the new ordering
+ """
+ if _internal_warnings:
+ warnings.warn(_("%s is deprecated. Use %s with the opconvert parameter set to True instead.") % \
+ ('portage.dep.dep_opconvert', 'portage.dep.use_reduce'), DeprecationWarning, stacklevel=2)
+
+ retlist = []
+ x = 0
+ while x != len(deplist):
+ if isinstance(deplist[x], list):
+ retlist.append(dep_opconvert(deplist[x]))
+ elif deplist[x] == "||":
+ retlist.append([deplist[x]] + dep_opconvert(deplist[x+1]))
+ x += 1
+ else:
+ retlist.append(deplist[x])
+ x += 1
+ return retlist
+
+def flatten(mylist):
+ """
+ Recursively traverse nested lists and return a single list containing
+ all non-list elements that are found.
+
+ Example usage:
+ >>> flatten([1, [2, 3, [4]]])
+ [1, 2, 3, 4]
+
+ @param mylist: A list containing nested lists and non-list elements.
+ @type mylist: List
+ @rtype: List
+ @return: A single list containing only non-list elements.
+ """
+ if _internal_warnings:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.flatten',), DeprecationWarning, stacklevel=2)
+
+ newlist = []
+ for x in mylist:
+ if isinstance(x, list):
+ newlist.extend(flatten(x))
+ else:
+ newlist.append(x)
+ return newlist
+
+
+_usedep_re = {
+ "0": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"),
+ "4-python": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@.-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"),
+}
+
+def _get_usedep_re(eapi):
+ """
+ When eapi is None then validation is not as strict, since we want the
+ same to work for multiple EAPIs that may have slightly different rules.
+ @param eapi: The EAPI
+ @type eapi: String or None
+ @rtype: regular expression object
+ @return: A regular expression object that matches valid USE deps for the
+ given eapi.
+ """
+ if eapi in (None, "4-python",):
+ return _usedep_re["4-python"]
+ else:
+ return _usedep_re["0"]
+
+class _use_dep(object):
+
+ __slots__ = ("__weakref__", "eapi", "conditional", "missing_enabled", "missing_disabled",
+ "disabled", "enabled", "tokens", "required")
+
+ class _conditionals_class(object):
+ __slots__ = ("enabled", "disabled", "equal", "not_equal")
+
+ def items(self):
+ for k in self.__slots__:
+ v = getattr(self, k, None)
+ if v:
+ yield (k, v)
+
+ def values(self):
+ for k in self.__slots__:
+ v = getattr(self, k, None)
+ if v:
+ yield v
+
+ # used in InvalidAtom messages
+ _conditional_strings = {
+ 'enabled' : '%s?',
+ 'disabled': '!%s?',
+ 'equal': '%s=',
+ 'not_equal': '!%s=',
+ }
+
+ def __init__(self, use, eapi, enabled_flags=None, disabled_flags=None, missing_enabled=None, \
+ missing_disabled=None, conditional=None, required=None):
+
+ self.eapi = eapi
+
+ if enabled_flags is not None:
+ #A shortcut for the classe's own methods.
+ self.tokens = use
+ if not isinstance(self.tokens, tuple):
+ self.tokens = tuple(self.tokens)
+
+ self.required = frozenset(required)
+ self.enabled = frozenset(enabled_flags)
+ self.disabled = frozenset(disabled_flags)
+ self.missing_enabled = frozenset(missing_enabled)
+ self.missing_disabled = frozenset(missing_disabled)
+ self.conditional = None
+
+ if conditional:
+ self.conditional = self._conditionals_class()
+ for k in "enabled", "disabled", "equal", "not_equal":
+ setattr(self.conditional, k, frozenset(conditional.get(k, [])))
+
+ return
+
+ enabled_flags = set()
+ disabled_flags = set()
+ missing_enabled = set()
+ missing_disabled = set()
+ no_default = set()
+
+ conditional = {}
+ usedep_re = _get_usedep_re(self.eapi)
+
+ for x in use:
+ m = usedep_re.match(x)
+ if m is None:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+
+ if not operator:
+ enabled_flags.add(flag)
+ elif operator == "-":
+ disabled_flags.add(flag)
+ elif operator == "?":
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ conditional.setdefault("disabled", set()).add(flag)
+ else:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+
+ if default:
+ if default == "(+)":
+ if flag in missing_disabled or flag in no_default:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ missing_enabled.add(flag)
+ else:
+ if flag in missing_enabled or flag in no_default:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ missing_disabled.add(flag)
+ else:
+ if flag in missing_enabled or flag in missing_disabled:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ no_default.add(flag)
+
+ self.tokens = use
+ if not isinstance(self.tokens, tuple):
+ self.tokens = tuple(self.tokens)
+
+ self.required = frozenset(no_default)
+
+ self.enabled = frozenset(enabled_flags)
+ self.disabled = frozenset(disabled_flags)
+ self.missing_enabled = frozenset(missing_enabled)
+ self.missing_disabled = frozenset(missing_disabled)
+ self.conditional = None
+
+ if conditional:
+ self.conditional = self._conditionals_class()
+ for k in "enabled", "disabled", "equal", "not_equal":
+ setattr(self.conditional, k, frozenset(conditional.get(k, [])))
+
+ def __bool__(self):
+ return bool(self.tokens)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __str__(self):
+ if not self.tokens:
+ return ""
+ return "[%s]" % (",".join(self.tokens),)
+
+ def __repr__(self):
+ return "portage.dep._use_dep(%s)" % repr(self.tokens)
+
+ def evaluate_conditionals(self, use):
+ """
+ Create a new instance with conditionals evaluated.
+
+ Conditional evaluation behavior:
+
+ parent state conditional result
+
+ x x? x
+ -x x?
+ x !x?
+ -x !x? -x
+
+ x x= x
+ -x x= -x
+ x !x= -x
+ -x !x= x
+
+ Conditional syntax examples:
+
+ Compact Form Equivalent Expanded Form
+
+ foo[bar?] bar? ( foo[bar] ) !bar? ( foo )
+ foo[!bar?] bar? ( foo ) !bar? ( foo[-bar] )
+ foo[bar=] bar? ( foo[bar] ) !bar? ( foo[-bar] )
+ foo[!bar=] bar? ( foo[-bar] ) !bar? ( foo[bar] )
+
+ """
+ enabled_flags = set(self.enabled)
+ disabled_flags = set(self.disabled)
+
+ tokens = []
+ usedep_re = _get_usedep_re(self.eapi)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+ if default is None:
+ default = ""
+
+ if operator == "?":
+ if flag in use:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "=":
+ if flag in use:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ else:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!=":
+ if flag in use:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "!?":
+ if flag not in use:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ tokens.append(x)
+
+ return _use_dep(tokens, self.eapi, enabled_flags=enabled_flags, disabled_flags=disabled_flags, \
+ missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, required=self.required)
+
+ def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+ """
+ Create a new instance with satisfied use deps removed.
+ """
+ if parent_use is None and self.conditional:
+ raise InvalidAtom("violated_conditionals needs 'parent_use'" + \
+ " parameter for conditional flags.")
+
+ enabled_flags = set()
+ disabled_flags = set()
+
+ conditional = {}
+ tokens = []
+
+ all_defaults = frozenset(chain(self.missing_enabled, self.missing_disabled))
+
+ def validate_flag(flag):
+ return is_valid_flag(flag) or flag in all_defaults
+
+ usedep_re = _get_usedep_re(self.eapi)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+
+ if not validate_flag(flag):
+ tokens.append(x)
+ if not operator:
+ enabled_flags.add(flag)
+ elif operator == "-":
+ disabled_flags.add(flag)
+ elif operator == "?":
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ conditional.setdefault("disabled", set()).add(flag)
+
+ continue
+
+ if not operator:
+ if flag not in other_use:
+ if is_valid_flag(flag) or flag in self.missing_disabled:
+ tokens.append(x)
+ enabled_flags.add(flag)
+ elif operator == "-":
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ disabled_flags.add(flag)
+ else:
+ tokens.append(x)
+ disabled_flags.add(flag)
+ elif operator == "?":
+ if flag not in parent_use or flag in other_use:
+ continue
+
+ if is_valid_flag(flag) or flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ if flag in parent_use and flag not in other_use:
+ if is_valid_flag(flag):
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ else:
+ if flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ elif flag not in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ if flag not in parent_use and flag not in other_use:
+ if is_valid_flag(flag):
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ else:
+ if flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif flag in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ if flag not in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag) and flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("disabled", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("disabled", set()).add(flag)
+
+ return _use_dep(tokens, self.eapi, enabled_flags=enabled_flags, disabled_flags=disabled_flags, \
+ missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, \
+ conditional=conditional, required=self.required)
+
+ def _eval_qa_conditionals(self, use_mask, use_force):
+ """
+ For repoman, evaluate all possible combinations within the constraints
+ of the given use.force and use.mask settings. The result may seem
+ ambiguous in the sense that the same flag can be in both the enabled
+ and disabled sets, but this is useful within the context of how its
+ intended to be used by repoman. It is assumed that the caller has
+ already ensured that there is no intersection between the given
+ use_mask and use_force sets when necessary.
+ """
+ enabled_flags = set(self.enabled)
+ disabled_flags = set(self.disabled)
+ missing_enabled = self.missing_enabled
+ missing_disabled = self.missing_disabled
+
+ tokens = []
+ usedep_re = _get_usedep_re(self.eapi)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+ if default is None:
+ default = ""
+
+ if operator == "?":
+ if flag not in use_mask:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "=":
+ if flag not in use_mask:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ if flag not in use_force:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!=":
+ if flag not in use_force:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ if flag not in use_mask:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!?":
+ if flag not in use_force:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ tokens.append(x)
+
+ return _use_dep(tokens, self.eapi, enabled_flags=enabled_flags, disabled_flags=disabled_flags, \
+ missing_enabled=missing_enabled, missing_disabled=missing_disabled, required=self.required)
+
+if sys.hexversion < 0x3000000:
+ _atom_base = unicode
+else:
+ _atom_base = str
+
+class Atom(_atom_base):
+
+ """
+ For compatibility with existing atom string manipulation code, this
+ class emulates most of the str methods that are useful with atoms.
+ """
+
+ class _blocker(object):
+ __slots__ = ("overlap",)
+
+ class _overlap(object):
+ __slots__ = ("forbid",)
+
+ def __init__(self, forbid=False):
+ self.forbid = forbid
+
+ def __init__(self, forbid_overlap=False):
+ self.overlap = self._overlap(forbid=forbid_overlap)
+
+ def __new__(cls, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=False,
+ _use=None, eapi=None, is_valid_flag=None):
+ return _atom_base.__new__(cls, s)
+
+ def __init__(self, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=False,
+ _use=None, eapi=None, is_valid_flag=None):
+ if isinstance(s, Atom):
+ # This is an efficiency assertion, to ensure that the Atom
+ # constructor is not called redundantly.
+ raise TypeError(_("Expected %s, got %s") % \
+ (_atom_base, type(s)))
+
+ if not isinstance(s, _atom_base):
+ # Avoid TypeError from _atom_base.__init__ with PyPy.
+ s = _unicode_decode(s)
+
+ _atom_base.__init__(s)
+
+ if "!" == s[:1]:
+ blocker = self._blocker(forbid_overlap=("!" == s[1:2]))
+ if blocker.overlap.forbid:
+ s = s[2:]
+ else:
+ s = s[1:]
+ else:
+ blocker = False
+ self.__dict__['blocker'] = blocker
+ m = _atom_re.match(s)
+ extended_syntax = False
+ if m is None:
+ if allow_wildcard:
+ m = _atom_wildcard_re.match(s)
+ if m is None:
+ raise InvalidAtom(self)
+ op = None
+ gdict = m.groupdict()
+ cpv = cp = gdict['simple']
+ if cpv.find("**") != -1:
+ raise InvalidAtom(self)
+ slot = gdict['slot']
+ repo = gdict['repo']
+ use_str = None
+ extended_syntax = True
+ else:
+ raise InvalidAtom(self)
+ elif m.group('op') is not None:
+ base = _atom_re.groupindex['op']
+ op = m.group(base + 1)
+ cpv = m.group(base + 2)
+ cp = m.group(base + 3)
+ slot = m.group(_atom_re.groups - 2)
+ repo = m.group(_atom_re.groups - 1)
+ use_str = m.group(_atom_re.groups)
+ if m.group(base + 4) is not None:
+ raise InvalidAtom(self)
+ elif m.group('star') is not None:
+ base = _atom_re.groupindex['star']
+ op = '=*'
+ cpv = m.group(base + 1)
+ cp = m.group(base + 2)
+ slot = m.group(_atom_re.groups - 2)
+ repo = m.group(_atom_re.groups - 1)
+ use_str = m.group(_atom_re.groups)
+ if m.group(base + 3) is not None:
+ raise InvalidAtom(self)
+ elif m.group('simple') is not None:
+ op = None
+ cpv = cp = m.group(_atom_re.groupindex['simple'] + 1)
+ slot = m.group(_atom_re.groups - 2)
+ repo = m.group(_atom_re.groups - 1)
+ use_str = m.group(_atom_re.groups)
+ if m.group(_atom_re.groupindex['simple'] + 2) is not None:
+ raise InvalidAtom(self)
+
+ else:
+ raise AssertionError(_("required group not found in atom: '%s'") % self)
+ self.__dict__['cp'] = cp
+ self.__dict__['cpv'] = cpv
+ self.__dict__['repo'] = repo
+ self.__dict__['slot'] = slot
+ self.__dict__['operator'] = op
+ self.__dict__['extended_syntax'] = extended_syntax
+
+ if not (repo is None or allow_repo):
+ raise InvalidAtom(self)
+
+ if use_str is not None:
+ if _use is not None:
+ use = _use
+ else:
+ use = _use_dep(use_str[1:-1].split(","), eapi)
+ without_use = Atom(m.group('without_use'), allow_repo=allow_repo)
+ else:
+ use = None
+ if unevaluated_atom is not None and \
+ unevaluated_atom.use is not None:
+ # unevaluated_atom.use is used for IUSE checks when matching
+ # packages, so it must not propagate to without_use
+ without_use = Atom(s, allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo)
+ else:
+ without_use = self
+
+ self.__dict__['use'] = use
+ self.__dict__['without_use'] = without_use
+
+ if unevaluated_atom:
+ self.__dict__['unevaluated_atom'] = unevaluated_atom
+ else:
+ self.__dict__['unevaluated_atom'] = self
+
+ if eapi is not None:
+ if not isinstance(eapi, basestring):
+ raise TypeError('expected eapi argument of ' + \
+ '%s, got %s: %s' % (basestring, type(eapi), eapi,))
+ if self.slot and not eapi_has_slot_deps(eapi):
+ raise InvalidAtom(
+ _("Slot deps are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ if self.use:
+ if not eapi_has_use_deps(eapi):
+ raise InvalidAtom(
+ _("Use deps are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ elif not eapi_has_use_dep_defaults(eapi) and \
+ (self.use.missing_enabled or self.use.missing_disabled):
+ raise InvalidAtom(
+ _("Use dep defaults are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ if is_valid_flag is not None and self.use.conditional:
+ invalid_flag = None
+ try:
+ for conditional_type, flags in \
+ self.use.conditional.items():
+ for flag in flags:
+ if not is_valid_flag(flag):
+ invalid_flag = (conditional_type, flag)
+ raise StopIteration()
+ except StopIteration:
+ pass
+ if invalid_flag is not None:
+ conditional_type, flag = invalid_flag
+ conditional_str = _use_dep._conditional_strings[conditional_type]
+ msg = _("USE flag '%s' referenced in " + \
+ "conditional '%s' in atom '%s' is not in IUSE") \
+ % (flag, conditional_str % flag, self)
+ raise InvalidAtom(msg, category='IUSE.missing')
+ if self.blocker and self.blocker.overlap.forbid and not eapi_has_strong_blocks(eapi):
+ raise InvalidAtom(
+ _("Strong blocks are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+
+ @property
+ def without_repo(self):
+ if self.repo is None:
+ return self
+ return Atom(self.replace(_repo_separator + self.repo, '', 1),
+ allow_wildcard=True)
+
+ @property
+ def without_slot(self):
+ if self.slot is None:
+ return self
+ return Atom(self.replace(_slot_separator + self.slot, '', 1),
+ allow_repo=True, allow_wildcard=True)
+
+ def __setattr__(self, name, value):
+ raise AttributeError("Atom instances are immutable",
+ self.__class__, name, value)
+
+ def intersects(self, other):
+ """
+ Atoms with different cpv, operator or use attributes cause this method
+ to return False even though there may actually be some intersection.
+ TODO: Detect more forms of intersection.
+ @param other: The package atom to match
+ @type other: Atom
+ @rtype: Boolean
+ @return: True if this atom and the other atom intersect,
+ False otherwise.
+ """
+ if not isinstance(other, Atom):
+ raise TypeError("expected %s, got %s" % \
+ (Atom, type(other)))
+
+ if self == other:
+ return True
+
+ if self.cp != other.cp or \
+ self.use != other.use or \
+ self.operator != other.operator or \
+ self.cpv != other.cpv:
+ return False
+
+ if self.slot is None or \
+ other.slot is None or \
+ self.slot == other.slot:
+ return True
+
+ return False
+
+ def evaluate_conditionals(self, use):
+ """
+ Create an atom instance with any USE conditionals evaluated.
+ @param use: The set of enabled USE flags
+ @type use: set
+ @rtype: Atom
+ @return: an atom instance with any USE conditionals evaluated
+ """
+ if not (self.use and self.use.conditional):
+ return self
+ atom = remove_slot(self)
+ if self.slot:
+ atom += ":%s" % self.slot
+ use_dep = self.use.evaluate_conditionals(use)
+ atom += str(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+ """
+ Create an atom instance with any USE conditional removed, that is
+ satisfied by other_use.
+ @param other_use: The set of enabled USE flags
+ @type other_use: set
+ @param is_valid_flag: Function that decides if a use flag is referenceable in use deps
+ @type is_valid_flag: function
+ @param parent_use: Set of enabled use flags of the package requiring this atom
+ @type parent_use: set
+ @rtype: Atom
+ @return: an atom instance with any satisfied USE conditionals removed
+ """
+ if not self.use:
+ return self
+ atom = remove_slot(self)
+ if self.slot:
+ atom += ":%s" % self.slot
+ use_dep = self.use.violated_conditionals(other_use, is_valid_flag, parent_use)
+ atom += str(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def _eval_qa_conditionals(self, use_mask, use_force):
+ if not (self.use and self.use.conditional):
+ return self
+ atom = remove_slot(self)
+ if self.slot:
+ atom += ":%s" % self.slot
+ use_dep = self.use._eval_qa_conditionals(use_mask, use_force)
+ atom += str(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def __copy__(self):
+ """Immutable, so returns self."""
+ return self
+
+ def __deepcopy__(self, memo=None):
+ """Immutable, so returns self."""
+ memo[id(self)] = self
+ return self
+
+_extended_cp_re_cache = {}
+
+def extended_cp_match(extended_cp, other_cp):
+ """
+ Checks if an extended syntax cp matches a non extended cp
+ """
+ # Escape special '+' and '.' characters which are allowed in atoms,
+ # and convert '*' to regex equivalent.
+ global _extended_cp_re_cache
+ extended_cp_re = _extended_cp_re_cache.get(extended_cp)
+ if extended_cp_re is None:
+ extended_cp_re = re.compile("^" + re.escape(extended_cp).replace(
+ r'\*', '[^/]*') + "$")
+ _extended_cp_re_cache[extended_cp] = extended_cp_re
+ return extended_cp_re.match(other_cp) is not None
+
+class ExtendedAtomDict(portage.cache.mappings.MutableMapping):
+ """
+ dict() wrapper that supports extended atoms as keys and allows lookup
+ of a normal cp against other normal cp and extended cp.
+ The value type has to be given to __init__ and is assumed to be the same
+ for all values.
+ """
+
+ __slots__ = ('_extended', '_normal', '_value_class')
+
+ def __init__(self, value_class):
+ self._extended = {}
+ self._normal = {}
+ self._value_class = value_class
+
+ def copy(self):
+ result = self.__class__(self._value_class)
+ result._extended.update(self._extended)
+ result._normal.update(self._normal)
+ return result
+
+ def __iter__(self):
+ for k in self._normal:
+ yield k
+ for k in self._extended:
+ yield k
+
+ def iteritems(self):
+ for item in self._normal.items():
+ yield item
+ for item in self._extended.items():
+ yield item
+
+ def __delitem__(self, cp):
+ if "*" in cp:
+ return self._extended.__delitem__(cp)
+ else:
+ return self._normal.__delitem__(cp)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
+
+ def __len__(self):
+ return len(self._normal) + len(self._extended)
+
+ def setdefault(self, cp, default=None):
+ if "*" in cp:
+ return self._extended.setdefault(cp, default)
+ else:
+ return self._normal.setdefault(cp, default)
+
+ def __getitem__(self, cp):
+
+ if not isinstance(cp, basestring):
+ raise KeyError(cp)
+
+ if '*' in cp:
+ return self._extended[cp]
+
+ ret = self._value_class()
+ normal_match = self._normal.get(cp)
+ match = False
+
+ if normal_match is not None:
+ match = True
+ if hasattr(ret, "update"):
+ ret.update(normal_match)
+ elif hasattr(ret, "extend"):
+ ret.extend(normal_match)
+ else:
+ raise NotImplementedError()
+
+ for extended_cp in self._extended:
+ if extended_cp_match(extended_cp, cp):
+ match = True
+ if hasattr(ret, "update"):
+ ret.update(self._extended[extended_cp])
+ elif hasattr(ret, "extend"):
+ ret.extend(self._extended[extended_cp])
+ else:
+ raise NotImplementedError()
+
+ if not match:
+ raise KeyError(cp)
+
+ return ret
+
+ def __setitem__(self, cp, val):
+ if "*" in cp:
+ self._extended[cp] = val
+ else:
+ self._normal[cp] = val
+
+ def __eq__(self, other):
+ return self._value_class == other._value_class and \
+ self._extended == other._extended and \
+ self._normal == other._normal
+
+ def clear(self):
+ self._extended.clear()
+ self._normal.clear()
+
+
+def get_operator(mydep):
+ """
+ Return the operator used in a depstring.
+
+ Example usage:
+ >>> from portage.dep import *
+ >>> get_operator(">=test-1.0")
+ '>='
+
+ @param mydep: The dep string to check
+ @type mydep: String
+ @rtype: String
+ @return: The operator. One of:
+ '~', '=', '>', '<', '=*', '>=', or '<='
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep)
+
+ return mydep.operator
+
+def dep_getcpv(mydep):
+ """
+ Return the category-package-version with any operators/slot specifications stripped off
+
+ Example usage:
+ >>> dep_getcpv('>=media-libs/test-3.0')
+ 'media-libs/test-3.0'
+
+ @param mydep: The depstring
+ @type mydep: String
+ @rtype: String
+ @return: The depstring with the operator removed
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep)
+
+ return mydep.cpv
+
+def dep_getslot(mydep):
+ """
+ Retrieve the slot on a depend.
+
+ Example usage:
+ >>> dep_getslot('app-misc/test:3')
+ '3'
+
+ @param mydep: The depstring to retrieve the slot of
+ @type mydep: String
+ @rtype: String
+ @return: The slot
+ """
+ slot = getattr(mydep, "slot", False)
+ if slot is not False:
+ return slot
+
+ #remove repo_name if present
+ mydep = mydep.split(_repo_separator)[0]
+
+ colon = mydep.find(_slot_separator)
+ if colon != -1:
+ bracket = mydep.find("[", colon)
+ if bracket == -1:
+ return mydep[colon+1:]
+ else:
+ return mydep[colon+1:bracket]
+ return None
+
+def dep_getrepo(mydep):
+ """
+ Retrieve the repo on a depend.
+
+ Example usage:
+ >>> dep_getrepo('app-misc/test::repository')
+ 'repository'
+
+ @param mydep: The depstring to retrieve the repository of
+ @type mydep: String
+ @rtype: String
+ @return: The repository name
+ """
+ repo = getattr(mydep, "repo", False)
+ if repo is not False:
+ return repo
+
+ metadata = getattr(mydep, "metadata", False)
+ if metadata:
+ repo = metadata.get('repository', False)
+ if repo is not False:
+ return repo
+
+ colon = mydep.find(_repo_separator)
+ if colon != -1:
+ bracket = mydep.find("[", colon)
+ if bracket == -1:
+ return mydep[colon+2:]
+ else:
+ return mydep[colon+2:bracket]
+ return None
+def remove_slot(mydep):
+ """
+ Removes dep components from the right side of an atom:
+ * slot
+ * use
+ * repo
+ And repo_name from the left side.
+ """
+ colon = mydep.find(_slot_separator)
+ if colon != -1:
+ mydep = mydep[:colon]
+ else:
+ bracket = mydep.find("[")
+ if bracket != -1:
+ mydep = mydep[:bracket]
+ return mydep
+
+def dep_getusedeps( depend ):
+ """
+ Pull a listing of USE Dependencies out of a dep atom.
+
+ Example usage:
+ >>> dep_getusedeps('app-misc/test:3[foo,-bar]')
+ ('foo', '-bar')
+
+ @param depend: The depstring to process
+ @type depend: String
+ @rtype: List
+ @return: List of use flags ( or [] if no flags exist )
+ """
+ use_list = []
+ open_bracket = depend.find('[')
+ # -1 = failure (think c++ string::npos)
+ comma_separated = False
+ bracket_count = 0
+ while( open_bracket != -1 ):
+ bracket_count += 1
+ if bracket_count > 1:
+ raise InvalidAtom(_("USE Dependency with more "
+ "than one set of brackets: %s") % (depend,))
+ close_bracket = depend.find(']', open_bracket )
+ if close_bracket == -1:
+ raise InvalidAtom(_("USE Dependency with no closing bracket: %s") % depend )
+ use = depend[open_bracket + 1: close_bracket]
+ # foo[1:1] may return '' instead of None, we don't want '' in the result
+ if not use:
+ raise InvalidAtom(_("USE Dependency with "
+ "no use flag ([]): %s") % depend )
+ if not comma_separated:
+ comma_separated = "," in use
+
+ if comma_separated and bracket_count > 1:
+ raise InvalidAtom(_("USE Dependency contains a mixture of "
+ "comma and bracket separators: %s") % depend )
+
+ if comma_separated:
+ for x in use.split(","):
+ if x:
+ use_list.append(x)
+ else:
+ raise InvalidAtom(_("USE Dependency with no use "
+ "flag next to comma: %s") % depend )
+ else:
+ use_list.append(use)
+
+ # Find next use flag
+ open_bracket = depend.find( '[', open_bracket+1 )
+ return tuple(use_list)
+
+# \w is [a-zA-Z0-9_]
+
+# 2.1.3 A slot name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_slot_separator = ":"
+_slot = r'([\w+][\w+.-]*)'
+_slot_re = re.compile('^' + _slot + '$', re.VERBOSE)
+
+_use = r'\[.*\]'
+_op = r'([=~]|[><]=?)'
+_repo_separator = "::"
+_repo_name = r'[\w][\w-]*'
+_repo = r'(?:' + _repo_separator + '(' + _repo_name + ')' + ')?'
+
+_atom_re = re.compile('^(?P<without_use>(?:' +
+ '(?P<op>' + _op + _cpv + ')|' +
+ '(?P<star>=' + _cpv + r'\*)|' +
+ '(?P<simple>' + _cp + '))' +
+ '(' + _slot_separator + _slot + ')?' + _repo + ')(' + _use + ')?$', re.VERBOSE)
+
+_extended_cat = r'[\w+*][\w+.*-]*'
+_extended_pkg = r'[\w+*][\w+*-]*?'
+
+_atom_wildcard_re = re.compile('(?P<simple>(' + _extended_cat + ')/(' + _extended_pkg + '))(:(?P<slot>' + _slot + '))?(' + _repo_separator + '(?P<repo>' + _repo_name + '))?$')
+
+_useflag_re = {
+ "0": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@-]*$'),
+ "4-python": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@.-]*$'),
+}
+
+def _get_useflag_re(eapi):
+ """
+ When eapi is None then validation is not as strict, since we want the
+ same to work for multiple EAPIs that may have slightly different rules.
+ @param eapi: The EAPI
+ @type eapi: String or None
+ @rtype: regular expression object
+ @return: A regular expression object that matches valid USE flags for the
+ given eapi.
+ """
+ if eapi in (None, "4-python",):
+ return _useflag_re["4-python"]
+ else:
+ return _useflag_re["0"]
+
+def isvalidatom(atom, allow_blockers=False, allow_wildcard=False, allow_repo=False):
+ """
+ Check to see if a depend atom is valid
+
+ Example usage:
+ >>> isvalidatom('media-libs/test-3.0')
+ False
+ >>> isvalidatom('>=media-libs/test-3.0')
+ True
+
+ @param atom: The depend atom to check against
+ @type atom: String or Atom
+ @rtype: Boolean
+ @return: One of the following:
+ 1) False if the atom is invalid
+ 2) True if the atom is valid
+ """
+ try:
+ if not isinstance(atom, Atom):
+ atom = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+ if not allow_blockers and atom.blocker:
+ return False
+ return True
+ except InvalidAtom:
+ return False
+
+def isjustname(mypkg):
+ """
+ Checks to see if the atom is only the package name (no version parts).
+
+ Example usage:
+ >>> isjustname('=media-libs/test-3.0')
+ False
+ >>> isjustname('media-libs/test')
+ True
+
+ @param mypkg: The package atom to check
+ @param mypkg: String or Atom
+ @rtype: Integer
+ @return: One of the following:
+ 1) False if the package string is not just the package name
+ 2) True if it is
+ """
+ try:
+ if not isinstance(mypkg, Atom):
+ mypkg = Atom(mypkg)
+ return mypkg == mypkg.cp
+ except InvalidAtom:
+ pass
+
+ for x in mypkg.split('-')[-2:]:
+ if ververify(x):
+ return False
+ return True
+
+def isspecific(mypkg):
+ """
+ Checks to see if a package is in =category/package-version or
+ package-version format.
+
+ Example usage:
+ >>> isspecific('media-libs/test')
+ False
+ >>> isspecific('=media-libs/test-3.0')
+ True
+
+ @param mypkg: The package depstring to check against
+ @type mypkg: String
+ @rtype: Boolean
+ @return: One of the following:
+ 1) False if the package string is not specific
+ 2) True if it is
+ """
+ try:
+ if not isinstance(mypkg, Atom):
+ mypkg = Atom(mypkg)
+ return mypkg != mypkg.cp
+ except InvalidAtom:
+ pass
+
+ # Fall back to legacy code for backward compatibility.
+ return not isjustname(mypkg)
+
+def dep_getkey(mydep):
+ """
+ Return the category/package-name of a depstring.
+
+ Example usage:
+ >>> dep_getkey('=media-libs/test-3.0')
+ 'media-libs/test'
+
+ @param mydep: The depstring to retrieve the category/package-name of
+ @type mydep: String
+ @rtype: String
+ @return: The package category/package-name
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
+
+ return mydep.cp
+
+def match_to_list(mypkg, mylist):
+ """
+ Searches list for entries that matches the package.
+
+ @param mypkg: The package atom to match
+ @type mypkg: String
+ @param mylist: The list of package atoms to compare against
+ @param mylist: List
+ @rtype: List
+ @return: A unique list of package atoms that match the given package atom
+ """
+ return [ x for x in set(mylist) if match_from_list(x, [mypkg]) ]
+
+def best_match_to_list(mypkg, mylist):
+ """
+ Returns the most specific entry that matches the package given.
+
+ @param mypkg: The package atom to check
+ @type mypkg: String
+ @param mylist: The list of package atoms to check against
+ @type mylist: List
+ @rtype: String
+ @return: The package atom which best matches given the following ordering:
+ - =cpv 6
+ - ~cpv 5
+ - =cpv* 4
+ - cp:slot 3
+ - >cpv 2
+ - <cpv 2
+ - >=cpv 2
+ - <=cpv 2
+ - cp 1
+ - cp:slot with extended syntax 0
+ - cp with extended syntax -1
+ """
+ operator_values = {'=':6, '~':5, '=*':4,
+ '>':2, '<':2, '>=':2, '<=':2, None:1}
+ maxvalue = -2
+ bestm = None
+ for x in match_to_list(mypkg, mylist):
+ if x.extended_syntax:
+ if dep_getslot(x) is not None:
+ if maxvalue < 0:
+ maxvalue = 0
+ bestm = x
+ else:
+ if maxvalue < -1:
+ maxvalue = -1
+ bestm = x
+ continue
+ if dep_getslot(x) is not None:
+ if maxvalue < 3:
+ maxvalue = 3
+ bestm = x
+ op_val = operator_values[x.operator]
+ if op_val > maxvalue:
+ maxvalue = op_val
+ bestm = x
+ return bestm
+
+def match_from_list(mydep, candidate_list):
+ """
+ Searches list for entries that matches the package.
+
+ @param mydep: The package atom to match
+ @type mydep: String
+ @param candidate_list: The list of package atoms to compare against
+ @param candidate_list: List
+ @rtype: List
+ @return: A list of package atoms that match the given package atom
+ """
+
+ if not candidate_list:
+ return []
+
+ from portage.util import writemsg
+ if "!" == mydep[:1]:
+ if "!" == mydep[1:2]:
+ mydep = mydep[2:]
+ else:
+ mydep = mydep[1:]
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
+
+ mycpv = mydep.cpv
+ mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
+ slot = mydep.slot
+
+ if not mycpv_cps:
+ cat, pkg = catsplit(mycpv)
+ ver = None
+ rev = None
+ else:
+ cat, pkg, ver, rev = mycpv_cps
+ if mydep == mycpv:
+ raise KeyError(_("Specific key requires an operator"
+ " (%s) (try adding an '=')") % (mydep))
+
+ if ver and rev:
+ operator = mydep.operator
+ if not operator:
+ writemsg(_("!!! Invalid atom: %s\n") % mydep, noiselevel=-1)
+ return []
+ else:
+ operator = None
+
+ mylist = []
+
+ if operator is None:
+ for x in candidate_list:
+ cp = getattr(x, "cp", None)
+ if cp is None:
+ mysplit = catpkgsplit(remove_slot(x))
+ if mysplit is not None:
+ cp = mysplit[0] + '/' + mysplit[1]
+
+ if cp is None:
+ continue
+
+ if cp == mycpv or (mydep.extended_syntax and \
+ extended_cp_match(mydep.cp, cp)):
+ mylist.append(x)
+
+ elif operator == "=": # Exact match
+ for x in candidate_list:
+ xcpv = getattr(x, "cpv", None)
+ if xcpv is None:
+ xcpv = remove_slot(x)
+ if not cpvequal(xcpv, mycpv):
+ continue
+ mylist.append(x)
+
+ elif operator == "=*": # glob match
+ # XXX: Nasty special casing for leading zeros
+ # Required as =* is a literal prefix match, so can't
+ # use vercmp
+ mysplit = catpkgsplit(mycpv)
+ myver = mysplit[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ mycpv = mysplit[0]+"/"+mysplit[1]+"-"+myver
+ for x in candidate_list:
+ xs = getattr(x, "cpv_split", None)
+ if xs is None:
+ xs = catpkgsplit(remove_slot(x))
+ myver = xs[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ xcpv = xs[0]+"/"+xs[1]+"-"+myver
+ if xcpv.startswith(mycpv):
+ mylist.append(x)
+
+ elif operator == "~": # version, any revision, match
+ for x in candidate_list:
+ xs = getattr(x, "cpv_split", None)
+ if xs is None:
+ xs = catpkgsplit(remove_slot(x))
+ if xs is None:
+ raise InvalidData(x)
+ if not cpvequal(xs[0]+"/"+xs[1]+"-"+xs[2], mycpv_cps[0]+"/"+mycpv_cps[1]+"-"+mycpv_cps[2]):
+ continue
+ if xs[2] != ver:
+ continue
+ mylist.append(x)
+
+ elif operator in [">", ">=", "<", "<="]:
+ mysplit = ["%s/%s" % (cat, pkg), ver, rev]
+ for x in candidate_list:
+ xs = getattr(x, "cpv_split", None)
+ if xs is None:
+ xs = catpkgsplit(remove_slot(x))
+ xcat, xpkg, xver, xrev = xs
+ xs = ["%s/%s" % (xcat, xpkg), xver, xrev]
+ try:
+ result = pkgcmp(xs, mysplit)
+ except ValueError: # pkgcmp may return ValueError during int() conversion
+ writemsg(_("\nInvalid package name: %s\n") % x, noiselevel=-1)
+ raise
+ if result is None:
+ continue
+ elif operator == ">":
+ if result > 0:
+ mylist.append(x)
+ elif operator == ">=":
+ if result >= 0:
+ mylist.append(x)
+ elif operator == "<":
+ if result < 0:
+ mylist.append(x)
+ elif operator == "<=":
+ if result <= 0:
+ mylist.append(x)
+ else:
+ raise KeyError(_("Unknown operator: %s") % mydep)
+ else:
+ raise KeyError(_("Unknown operator: %s") % mydep)
+
+ if slot is not None and not mydep.extended_syntax:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ xslot = getattr(x, "slot", False)
+ if xslot is False:
+ xslot = dep_getslot(x)
+ if xslot is not None and xslot != slot:
+ continue
+ mylist.append(x)
+
+ if mydep.unevaluated_atom.use:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ use = getattr(x, "use", None)
+ if use is not None:
+ if mydep.unevaluated_atom.use and \
+ not x.iuse.is_valid_flag(
+ mydep.unevaluated_atom.use.required):
+ continue
+
+ if mydep.use:
+
+ missing_enabled = mydep.use.missing_enabled.difference(x.iuse.all)
+ missing_disabled = mydep.use.missing_disabled.difference(x.iuse.all)
+
+ if mydep.use.enabled:
+ if mydep.use.enabled.intersection(missing_disabled):
+ continue
+ need_enabled = mydep.use.enabled.difference(use.enabled)
+ if need_enabled:
+ need_enabled = need_enabled.difference(missing_enabled)
+ if need_enabled:
+ continue
+
+ if mydep.use.disabled:
+ if mydep.use.disabled.intersection(missing_enabled):
+ continue
+ need_disabled = mydep.use.disabled.intersection(use.enabled)
+ if need_disabled:
+ need_disabled = need_disabled.difference(missing_disabled)
+ if need_disabled:
+ continue
+
+ mylist.append(x)
+
+ if mydep.repo:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ repo = getattr(x, "repo", False)
+ if repo is False:
+ repo = dep_getrepo(x)
+ if repo is not None and repo != mydep.repo:
+ continue
+ mylist.append(x)
+
+ return mylist
+
+def human_readable_required_use(required_use):
+ return required_use.replace("^^", "exactly-one-of").replace("||", "any-of")
+
+def get_required_use_flags(required_use):
+ """
+ Returns a set of use flags that are used in the given REQUIRED_USE string
+
+ @param required_use: REQUIRED_USE string
+ @type required_use: String
+ @rtype: Set
+ @return: Set of use flags that are used in the given REQUIRED_USE string
+ """
+
+ mysplit = required_use.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+
+ used_flags = set()
+
+ def register_token(token):
+ if token.endswith("?"):
+ token = token[:-1]
+ if token.startswith("!"):
+ token = token[1:]
+ used_flags.add(token)
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ ignore = False
+ if stack[level]:
+ if stack[level][-1] in ("||", "^^") or \
+ (not isinstance(stack[level][-1], bool) and \
+ stack[level][-1][-1] == "?"):
+ ignore = True
+ stack[level].pop()
+ stack[level].append(True)
+
+ if l and not ignore:
+ stack[level].append(all(x for x in l))
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ elif token in ("||", "^^"):
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket or "(" in token or ")" in token or \
+ "|" in token or "^" in token:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ stack[level].append(True)
+
+ register_token(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ return frozenset(used_flags)
+
+class _RequiredUseLeaf(object):
+
+ __slots__ = ('_satisfied', '_token')
+
+ def __init__(self, token, satisfied):
+ self._token = token
+ self._satisfied = satisfied
+
+ def tounicode(self):
+ return self._token
+
+class _RequiredUseBranch(object):
+
+ __slots__ = ('_children', '_operator', '_parent', '_satisfied')
+
+ def __init__(self, operator=None, parent=None):
+ self._children = []
+ self._operator = operator
+ self._parent = parent
+ self._satisfied = False
+
+ def __bool__(self):
+ return self._satisfied
+
+ def tounicode(self):
+
+ include_parens = self._parent is not None
+ tokens = []
+ if self._operator is not None:
+ tokens.append(self._operator)
+
+ if include_parens:
+ tokens.append("(")
+
+ complex_nesting = False
+ node = self
+ while node != None and not complex_nesting:
+ if node._operator in ("||", "^^"):
+ complex_nesting = True
+ else:
+ node = node._parent
+
+ if complex_nesting:
+ for child in self._children:
+ tokens.append(child.tounicode())
+ else:
+ for child in self._children:
+ if not child._satisfied:
+ tokens.append(child.tounicode())
+
+ if include_parens:
+ tokens.append(")")
+
+ return " ".join(tokens)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+def check_required_use(required_use, use, iuse_match):
+ """
+ Checks if the use flags listed in 'use' satisfy all
+ constraints specified in 'constraints'.
+
+ @param required_use: REQUIRED_USE string
+ @type required_use: String
+ @param use: Enabled use flags
+ @param use: List
+ @param iuse_match: Callable that takes a single flag argument and returns
+ True if the flag is matched, false otherwise,
+ @param iuse_match: Callable
+ @rtype: Bool
+ @return: Indicates if REQUIRED_USE constraints are satisfied
+ """
+
+ def is_active(token):
+ if token.startswith("!"):
+ flag = token[1:]
+ is_negated = True
+ else:
+ flag = token
+ is_negated = False
+
+ if not flag or not iuse_match(flag):
+ msg = _("USE flag '%s' is not in IUSE") \
+ % (flag,)
+ e = InvalidData(msg, category='IUSE.missing')
+ raise InvalidDependString(msg, errors=(e,))
+
+ return (flag in use and not is_negated) or \
+ (flag not in use and is_negated)
+
+ def is_satisfied(operator, argument):
+ if not argument:
+ #|| ( ) -> True
+ return True
+
+ if operator == "||":
+ return (True in argument)
+ elif operator == "^^":
+ return (argument.count(True) == 1)
+ elif operator[-1] == "?":
+ return (False not in argument)
+
+ mysplit = required_use.split()
+ level = 0
+ stack = [[]]
+ tree = _RequiredUseBranch()
+ node = tree
+ need_bracket = False
+
+ for token in mysplit:
+ if token == "(":
+ if not need_bracket:
+ child = _RequiredUseBranch(parent=node)
+ node._children.append(child)
+ node = child
+
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ op = None
+ if stack[level]:
+ if stack[level][-1] in ("||", "^^"):
+ op = stack[level].pop()
+ satisfied = is_satisfied(op, l)
+ stack[level].append(satisfied)
+ node._satisfied = satisfied
+
+ elif not isinstance(stack[level][-1], bool) and \
+ stack[level][-1][-1] == "?":
+ op = stack[level].pop()
+ if is_active(op[:-1]):
+ satisfied = is_satisfied(op, l)
+ stack[level].append(satisfied)
+ node._satisfied = satisfied
+ else:
+ node._satisfied = True
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ node = node._parent
+ continue
+
+ if op is None:
+ satisfied = False not in l
+ node._satisfied = satisfied
+ if l:
+ stack[level].append(satisfied)
+
+ if len(node._children) <= 1 or \
+ node._parent._operator not in ("||", "^^"):
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ for child in node._children:
+ node._parent._children.append(child)
+ if isinstance(child, _RequiredUseBranch):
+ child._parent = node._parent
+
+ elif not node._children:
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+
+ elif len(node._children) == 1 and op in ("||", "^^"):
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ node._parent._children.append(node._children[0])
+ if isinstance(node._children[0], _RequiredUseBranch):
+ node._children[0]._parent = node._parent
+ node = node._children[0]
+ if node._operator is None and \
+ node._parent._operator not in ("||", "^^"):
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ for child in node._children:
+ node._parent._children.append(child)
+ if isinstance(child, _RequiredUseBranch):
+ child._parent = node._parent
+
+ node = node._parent
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ elif token in ("||", "^^"):
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ need_bracket = True
+ stack[level].append(token)
+ child = _RequiredUseBranch(operator=token, parent=node)
+ node._children.append(child)
+ node = child
+ else:
+ if need_bracket or "(" in token or ")" in token or \
+ "|" in token or "^" in token:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ child = _RequiredUseBranch(operator=token, parent=node)
+ node._children.append(child)
+ node = child
+ else:
+ satisfied = is_active(token)
+ stack[level].append(satisfied)
+ node._children.append(_RequiredUseLeaf(token, satisfied))
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ tree._satisfied = False not in stack[0]
+ return tree
+
+def extract_affecting_use(mystr, atom, eapi=None):
+ """
+ Take a dep string and an atom and return the use flags
+ that decide if the given atom is in effect.
+
+ Example usage:
+ >>> extract_use_cond('sasl? ( dev-libs/cyrus-sasl ) \
+ !minimal? ( cxx? ( dev-libs/cyrus-sasl ) )', 'dev-libs/cyrus-sasl')
+ (['sasl', 'minimal', 'cxx'])
+
+ @param dep: The dependency string
+ @type mystr: String
+ @param atom: The atom to get into effect
+ @type atom: String
+ @rtype: Tuple of two lists of strings
+ @return: List of use flags that need to be enabled, List of use flag that need to be disabled
+ """
+ useflag_re = _get_useflag_re(eapi)
+ mysplit = mystr.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+ affecting_use = set()
+
+ def flag(conditional):
+ if conditional[0] == "!":
+ flag = conditional[1:-1]
+ else:
+ flag = conditional[:-1]
+
+ if useflag_re.match(flag) is None:
+ raise InvalidDependString(
+ _("invalid use flag '%s' in conditional '%s'") % \
+ (flag, conditional))
+
+ return flag
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def ends_in_operator(k):
+ return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
+ if len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ stack[level].extend(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ stack[level].append(l)
+
+ if l:
+ if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif len(l) == 1 and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A
+ stack[level].pop()
+ special_append()
+ elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
+ #Optimize: || ( || ( ... ) ) -> || ( ... )
+ # foo? ( foo? ( ... ) ) -> foo? ( ... )
+ # || ( foo? ( ... ) ) -> foo? ( ... )
+ stack[level].pop()
+ special_append()
+ if l[0][-1] == "?":
+ affecting_use.add(flag(l[0]))
+ else:
+ if stack[level] and stack[level][-1][-1] == "?":
+ affecting_use.add(flag(stack[level][-1]))
+ special_append()
+ else:
+ if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
+ stack[level].pop()
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ elif token == "||":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ elif token == atom:
+ stack[level].append(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ return affecting_use
diff --git a/portage_with_autodep/pym/portage/dep/dep_check.py b/portage_with_autodep/pym/portage/dep/dep_check.py
new file mode 100644
index 0000000..01d5021
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dep/dep_check.py
@@ -0,0 +1,679 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['dep_check', 'dep_eval', 'dep_wordreduce', 'dep_zapdeps']
+
+import logging
+
+import portage
+from portage import _unicode_decode
+from portage.dep import Atom, match_from_list, use_reduce
+from portage.exception import InvalidDependString, ParseError
+from portage.localization import _
+from portage.util import writemsg, writemsg_level
+from portage.versions import catpkgsplit, cpv_getkey, pkgcmp
+
+def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
+ trees=None, use_mask=None, use_force=None, **kwargs):
+ """
+ In order to solve bug #141118, recursively expand new-style virtuals so
+ as to collapse one or more levels of indirection, generating an expanded
+ search space. In dep_zapdeps, new-style virtuals will be assigned
+ zero cost regardless of whether or not they are currently installed. Virtual
+ blockers are supported but only when the virtual expands to a single
+ atom because it wouldn't necessarily make sense to block all the components
+ of a compound virtual. When more than one new-style virtual is matched,
+ the matches are sorted from highest to lowest versions and the atom is
+ expanded to || ( highest match ... lowest match )."""
+ newsplit = []
+ mytrees = trees[myroot]
+ portdb = mytrees["porttree"].dbapi
+ pkg_use_enabled = mytrees.get("pkg_use_enabled")
+ # Atoms are stored in the graph as (atom, id(atom)) tuples
+ # since each atom is considered to be a unique entity. For
+ # example, atoms that appear identical may behave differently
+ # in USE matching, depending on their unevaluated form. Also,
+ # specially generated virtual atoms may appear identical while
+ # having different _orig_atom attributes.
+ atom_graph = mytrees.get("atom_graph")
+ parent = mytrees.get("parent")
+ virt_parent = mytrees.get("virt_parent")
+ graph_parent = None
+ eapi = None
+ if parent is not None:
+ if virt_parent is not None:
+ graph_parent = virt_parent
+ parent = virt_parent
+ else:
+ graph_parent = parent
+ eapi = parent.metadata["EAPI"]
+ repoman = not mysettings.local_config
+ if kwargs["use_binaries"]:
+ portdb = trees[myroot]["bintree"].dbapi
+ pprovideddict = mysettings.pprovideddict
+ myuse = kwargs["myuse"]
+ for x in mysplit:
+ if x == "||":
+ newsplit.append(x)
+ continue
+ elif isinstance(x, list):
+ newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
+ mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
+ use_force=use_force, **kwargs))
+ continue
+
+ if not isinstance(x, Atom):
+ raise ParseError(
+ _("invalid token: '%s'") % x)
+
+ if repoman:
+ x = x._eval_qa_conditionals(use_mask, use_force)
+
+ mykey = x.cp
+ if not mykey.startswith("virtual/"):
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ if x.blocker:
+ # Virtual blockers are no longer expanded here since
+ # the un-expanded virtual atom is more useful for
+ # maintaining a cache of blocker atoms.
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ if repoman or not hasattr(portdb, 'match_pkgs') or \
+ pkg_use_enabled is None:
+ if portdb.cp_list(x.cp):
+ newsplit.append(x)
+ else:
+ # TODO: Add PROVIDE check for repoman.
+ a = []
+ myvartree = mytrees.get("vartree")
+ if myvartree is not None:
+ mysettings._populate_treeVirtuals_if_needed(myvartree)
+ mychoices = mysettings.getvirtuals().get(mykey, [])
+ for y in mychoices:
+ a.append(Atom(x.replace(x.cp, y.cp, 1)))
+ if not a:
+ newsplit.append(x)
+ elif len(a) == 1:
+ newsplit.append(a[0])
+ else:
+ newsplit.append(['||'] + a)
+ continue
+
+ pkgs = []
+ # Ignore USE deps here, since otherwise we might not
+ # get any matches. Choices with correct USE settings
+ # will be preferred in dep_zapdeps().
+ matches = portdb.match_pkgs(x.without_use)
+ # Use descending order to prefer higher versions.
+ matches.reverse()
+ for pkg in matches:
+ # only use new-style matches
+ if pkg.cp.startswith("virtual/"):
+ pkgs.append(pkg)
+
+ mychoices = []
+ if not pkgs and not portdb.cp_list(x.cp):
+ myvartree = mytrees.get("vartree")
+ if myvartree is not None:
+ mysettings._populate_treeVirtuals_if_needed(myvartree)
+ mychoices = mysettings.getvirtuals().get(mykey, [])
+
+ if not (pkgs or mychoices):
+ # This one couldn't be expanded as a new-style virtual. Old-style
+ # virtuals have already been expanded by dep_virtual, so this one
+ # is unavailable and dep_zapdeps will identify it as such. The
+ # atom is not eliminated here since it may still represent a
+ # dependency that needs to be satisfied.
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ a = []
+ for pkg in pkgs:
+ virt_atom = '=' + pkg.cpv
+ if x.unevaluated_atom.use:
+ virt_atom += str(x.unevaluated_atom.use)
+ virt_atom = Atom(virt_atom)
+ if parent is None:
+ if myuse is None:
+ virt_atom = virt_atom.evaluate_conditionals(
+ mysettings.get("PORTAGE_USE", "").split())
+ else:
+ virt_atom = virt_atom.evaluate_conditionals(myuse)
+ else:
+ virt_atom = virt_atom.evaluate_conditionals(
+ pkg_use_enabled(parent))
+ else:
+ virt_atom = Atom(virt_atom)
+
+ # Allow the depgraph to map this atom back to the
+ # original, in order to avoid distortion in places
+ # like display or conflict resolution code.
+ virt_atom.__dict__['_orig_atom'] = x
+
+ # According to GLEP 37, RDEPEND is the only dependency
+ # type that is valid for new-style virtuals. Repoman
+ # should enforce this.
+ depstring = pkg.metadata['RDEPEND']
+ pkg_kwargs = kwargs.copy()
+ pkg_kwargs["myuse"] = pkg_use_enabled(pkg)
+ if edebug:
+ writemsg_level(_("Virtual Parent: %s\n") \
+ % (pkg,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level(_("Virtual Depstring: %s\n") \
+ % (depstring,), noiselevel=-1, level=logging.DEBUG)
+
+ # Set EAPI used for validation in dep_check() recursion.
+ mytrees["virt_parent"] = pkg
+
+ try:
+ mycheck = dep_check(depstring, mydbapi, mysettings,
+ myroot=myroot, trees=trees, **pkg_kwargs)
+ finally:
+ # Restore previous EAPI after recursion.
+ if virt_parent is not None:
+ mytrees["virt_parent"] = virt_parent
+ else:
+ del mytrees["virt_parent"]
+
+ if not mycheck[0]:
+ raise ParseError(_unicode_decode("%s: %s '%s'") % \
+ (pkg, mycheck[1], depstring))
+
+ # pull in the new-style virtual
+ mycheck[1].append(virt_atom)
+ a.append(mycheck[1])
+ if atom_graph is not None:
+ virt_atom_node = (virt_atom, id(virt_atom))
+ atom_graph.add(virt_atom_node, graph_parent)
+ atom_graph.add(pkg, virt_atom_node)
+ # Plain old-style virtuals. New-style virtuals are preferred.
+ if not pkgs:
+ for y in mychoices:
+ new_atom = Atom(x.replace(x.cp, y.cp, 1))
+ matches = portdb.match(new_atom)
+ # portdb is an instance of depgraph._dep_check_composite_db, so
+ # USE conditionals are already evaluated.
+ if matches and mykey in \
+ portdb.aux_get(matches[-1], ['PROVIDE'])[0].split():
+ a.append(new_atom)
+ if atom_graph is not None:
+ atom_graph.add((new_atom, id(new_atom)),
+ graph_parent)
+
+ if not a and mychoices:
+ # Check for a virtual package.provided match.
+ for y in mychoices:
+ new_atom = Atom(x.replace(x.cp, y.cp, 1))
+ if match_from_list(new_atom,
+ pprovideddict.get(new_atom.cp, [])):
+ a.append(new_atom)
+ if atom_graph is not None:
+ atom_graph.add((new_atom, id(new_atom)), graph_parent)
+
+ if not a:
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ elif len(a) == 1:
+ newsplit.append(a[0])
+ else:
+ newsplit.append(['||'] + a)
+
+ return newsplit
+
+def dep_eval(deplist):
+ if not deplist:
+ return 1
+ if deplist[0]=="||":
+ #or list; we just need one "1"
+ for x in deplist[1:]:
+ if isinstance(x, list):
+ if dep_eval(x)==1:
+ return 1
+ elif x==1:
+ return 1
+ #XXX: unless there's no available atoms in the list
+ #in which case we need to assume that everything is
+ #okay as some ebuilds are relying on an old bug.
+ if len(deplist) == 1:
+ return 1
+ return 0
+ else:
+ for x in deplist:
+ if isinstance(x, list):
+ if dep_eval(x)==0:
+ return 0
+ elif x==0 or x==2:
+ return 0
+ return 1
+
+def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
+ """
+ Takes an unreduced and reduced deplist and removes satisfied dependencies.
+ Returned deplist contains steps that must be taken to satisfy dependencies.
+ """
+ if trees is None:
+ trees = portage.db
+ writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
+ if not reduced or unreduced == ["||"] or dep_eval(reduced):
+ return []
+
+ if unreduced[0] != "||":
+ unresolved = []
+ for x, satisfied in zip(unreduced, reduced):
+ if isinstance(x, list):
+ unresolved += dep_zapdeps(x, satisfied, myroot,
+ use_binaries=use_binaries, trees=trees)
+ elif not satisfied: