aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Bersenev <bay@hackerdom.ru>2011-08-21 17:35:50 +0000
committerAlexander Bersenev <bay@hackerdom.ru>2011-08-21 17:35:50 +0000
commit91ffc6c50001d41fe1d16981baa32fb557463375 (patch)
tree393551fe844a9c7ee030ad71efe03a92b76ac569 /portage_with_autodep/pym/portage
parentportage integration patch is added (diff)
downloadautodep-91ffc6c50001d41fe1d16981baa32fb557463375.tar.gz
autodep-91ffc6c50001d41fe1d16981baa32fb557463375.tar.bz2
autodep-91ffc6c50001d41fe1d16981baa32fb557463375.zip
add a patched version of portage
Diffstat (limited to 'portage_with_autodep/pym/portage')
-rw-r--r--portage_with_autodep/pym/portage/__init__.py610
-rw-r--r--portage_with_autodep/pym/portage/_global_updates.py250
-rw-r--r--portage_with_autodep/pym/portage/_legacy_globals.py81
-rw-r--r--portage_with_autodep/pym/portage/_selinux.py129
-rw-r--r--portage_with_autodep/pym/portage/_sets/__init__.py245
-rw-r--r--portage_with_autodep/pym/portage/_sets/base.py264
-rw-r--r--portage_with_autodep/pym/portage/_sets/dbapi.py383
-rw-r--r--portage_with_autodep/pym/portage/_sets/files.py341
-rw-r--r--portage_with_autodep/pym/portage/_sets/libs.py98
-rw-r--r--portage_with_autodep/pym/portage/_sets/profiles.py53
-rw-r--r--portage_with_autodep/pym/portage/_sets/security.py86
-rw-r--r--portage_with_autodep/pym/portage/_sets/shell.py44
-rw-r--r--portage_with_autodep/pym/portage/cache/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/cache/anydbm.py113
-rw-r--r--portage_with_autodep/pym/portage/cache/cache_errors.py62
-rw-r--r--portage_with_autodep/pym/portage/cache/ebuild_xattr.py171
-rw-r--r--portage_with_autodep/pym/portage/cache/flat_hash.py155
-rw-r--r--portage_with_autodep/pym/portage/cache/flat_list.py134
-rw-r--r--portage_with_autodep/pym/portage/cache/fs_template.py90
-rw-r--r--portage_with_autodep/pym/portage/cache/mappings.py485
-rw-r--r--portage_with_autodep/pym/portage/cache/metadata.py154
-rw-r--r--portage_with_autodep/pym/portage/cache/metadata_overlay.py105
-rw-r--r--portage_with_autodep/pym/portage/cache/sql_template.py301
-rw-r--r--portage_with_autodep/pym/portage/cache/sqlite.py245
-rw-r--r--portage_with_autodep/pym/portage/cache/template.py236
-rw-r--r--portage_with_autodep/pym/portage/cache/util.py170
-rw-r--r--portage_with_autodep/pym/portage/cache/volatile.py25
-rw-r--r--portage_with_autodep/pym/portage/checksum.py291
-rw-r--r--portage_with_autodep/pym/portage/const.py143
-rw-r--r--portage_with_autodep/pym/portage/cvstree.py293
-rw-r--r--portage_with_autodep/pym/portage/data.py122
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_MergeProcess.py282
-rw-r--r--portage_with_autodep/pym/portage/dbapi/__init__.py302
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py72
-rw-r--r--portage_with_autodep/pym/portage/dbapi/bintree.py1366
-rw-r--r--portage_with_autodep/pym/portage/dbapi/cpv_expand.py106
-rw-r--r--portage_with_autodep/pym/portage/dbapi/dep_expand.py56
-rw-r--r--portage_with_autodep/pym/portage/dbapi/porttree.py1168
-rw-r--r--portage_with_autodep/pym/portage/dbapi/vartree.py4527
-rw-r--r--portage_with_autodep/pym/portage/dbapi/virtual.py131
-rw-r--r--portage_with_autodep/pym/portage/debug.py120
-rw-r--r--portage_with_autodep/pym/portage/dep/__init__.py2432
-rw-r--r--portage_with_autodep/pym/portage/dep/dep_check.py679
-rw-r--r--portage_with_autodep/pym/portage/dispatch_conf.py188
-rw-r--r--portage_with_autodep/pym/portage/eapi.py50
-rw-r--r--portage_with_autodep/pym/portage/eclass_cache.py123
-rw-r--r--portage_with_autodep/pym/portage/elog/__init__.py182
-rw-r--r--portage_with_autodep/pym/portage/elog/filtering.py15
-rw-r--r--portage_with_autodep/pym/portage/elog/messages.py172
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_custom.py19
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_echo.py46
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_mail.py43
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_mail_summary.py89
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_save.py51
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_save_summary.py59
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_syslog.py32
-rw-r--r--portage_with_autodep/pym/portage/env/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/env/config.py105
-rw-r--r--portage_with_autodep/pym/portage/env/loaders.py319
-rw-r--r--portage_with_autodep/pym/portage/env/validators.py20
-rw-r--r--portage_with_autodep/pym/portage/exception.py186
-rw-r--r--portage_with_autodep/pym/portage/getbinpkg.py861
-rw-r--r--portage_with_autodep/pym/portage/glsa.py699
-rw-r--r--portage_with_autodep/pym/portage/localization.py20
-rw-r--r--portage_with_autodep/pym/portage/locks.py395
-rw-r--r--portage_with_autodep/pym/portage/mail.py177
-rw-r--r--portage_with_autodep/pym/portage/manifest.py538
-rw-r--r--portage_with_autodep/pym/portage/news.py351
-rw-r--r--portage_with_autodep/pym/portage/output.py794
-rw-r--r--portage_with_autodep/pym/portage/package/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py284
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py236
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py182
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py189
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py235
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.py233
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.py23
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/features_set.py128
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/helper.py64
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py185
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.py27
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.py9
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py98
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py82
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/config.py2224
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.py42
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/digestcheck.py167
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/digestgen.py202
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/doebuild.py1791
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/fetch.py1129
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py124
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py174
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py370
-rw-r--r--portage_with_autodep/pym/portage/process.py427
-rw-r--r--portage_with_autodep/pym/portage/proxy/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/proxy/lazyimport.py212
-rw-r--r--portage_with_autodep/pym/portage/proxy/objectproxy.py91
-rw-r--r--portage_with_autodep/pym/portage/repository/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/repository/config.py504
-rw-r--r--portage_with_autodep/pym/portage/tests/__init__.py244
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/setup_env.py85
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/test_dobin.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/test_dodir.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py58
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testAtom.py315
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py219
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py18
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py75
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testStandalone.py36
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py43
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py28
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_get_operator.py33
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py42
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_isjustname.py24
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py146
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py108
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py66
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py627
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py43
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_config.py198
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py82
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py124
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py32
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py52
-rw-r--r--portage_with_autodep/pym/portage/tests/env/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/env/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py37
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py39
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py145
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py81
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.py42
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_compile_modules.py46
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_import_modules.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py124
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py46
-rw-r--r--portage_with_autodep/pym/portage/tests/news/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/news/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/news/test_NewsItem.py95
-rw-r--r--portage_with_autodep/pym/portage/tests/process/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/process/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/process/test_poll.py39
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py690
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py326
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py169
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py84
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_depclean.py285
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_depth.py252
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_eapi.py115
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py453
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py31
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py318
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_multislot.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_output.py88
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py138
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_required_use.py114
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_simple.py57
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py143
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py40
-rwxr-xr-xportage_with_autodep/pym/portage/tests/runTests46
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py61
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py32
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py27
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/testShell.py28
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/test_string_format.py108
-rw-r--r--portage_with_autodep/pym/portage/tests/util/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/util/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_digraph.py201
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_getconfig.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_grabdict.py11
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py14
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackDictList.py17
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackDicts.py36
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackLists.py19
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py24
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_varExpand.py92
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/test_vercmp.py80
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py16
-rw-r--r--portage_with_autodep/pym/portage/update.py320
-rw-r--r--portage_with_autodep/pym/portage/util/ExtractKernelVersion.py76
-rw-r--r--portage_with_autodep/pym/portage/util/__init__.py1602
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py805
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py172
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/util/_pty.py212
-rw-r--r--portage_with_autodep/pym/portage/util/digraph.py342
-rw-r--r--portage_with_autodep/pym/portage/util/env_update.py293
-rw-r--r--portage_with_autodep/pym/portage/util/lafilefixer.py185
-rw-r--r--portage_with_autodep/pym/portage/util/listdir.py151
-rw-r--r--portage_with_autodep/pym/portage/util/movefile.py242
-rw-r--r--portage_with_autodep/pym/portage/util/mtimedb.py81
-rw-r--r--portage_with_autodep/pym/portage/versions.py403
-rw-r--r--portage_with_autodep/pym/portage/xml/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/xml/metadata.py376
-rw-r--r--portage_with_autodep/pym/portage/xpak.py497
238 files changed, 45785 insertions, 0 deletions
diff --git a/portage_with_autodep/pym/portage/__init__.py b/portage_with_autodep/pym/portage/__init__.py
new file mode 100644
index 0000000..2a2eb99
--- /dev/null
+++ b/portage_with_autodep/pym/portage/__init__.py
@@ -0,0 +1,610 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+VERSION="HEAD"
+
+# ===========================================================================
+# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
+# ===========================================================================
+
+try:
+ import sys
+ import errno
+ if not hasattr(errno, 'ESTALE'):
+ # ESTALE may not be defined on some systems, such as interix.
+ errno.ESTALE = -1
+ import re
+ import types
+
+ # Try the commands module first, since this allows us to eliminate
+ # the subprocess module from the baseline imports under python2.
+ try:
+ from commands import getstatusoutput as subprocess_getstatusoutput
+ except ImportError:
+ from subprocess import getstatusoutput as subprocess_getstatusoutput
+
+ import platform
+
+ # Temporarily delete these imports, to ensure that only the
+ # wrapped versions are imported by portage internals.
+ import os
+ del os
+ import shutil
+ del shutil
+
+except ImportError as e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
+ sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
+ sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
+
+ sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
+ sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
+ sys.stderr.write(" "+str(e)+"\n\n");
+ raise
+
+try:
+
+ import portage.proxy.lazyimport
+ import portage.proxy as proxy
+ proxy.lazyimport.lazyimport(globals(),
+ 'portage.cache.cache_errors:CacheError',
+ 'portage.checksum',
+ 'portage.checksum:perform_checksum,perform_md5,prelink_capable',
+ 'portage.cvstree',
+ 'portage.data',
+ 'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
+ 'uid,userland,userpriv_groups,wheelgid',
+ 'portage.dbapi',
+ 'portage.dbapi.bintree:bindbapi,binarytree',
+ 'portage.dbapi.cpv_expand:cpv_expand',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,' + \
+ 'portagetree,portdbapi',
+ 'portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree',
+ 'portage.dbapi.virtual:fakedbapi',
+ 'portage.dep',
+ 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
+ 'flatten,get_operator,isjustname,isspecific,isvalidatom,' + \
+ 'match_from_list,match_to_list',
+ 'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
+ 'portage.eclass_cache',
+ 'portage.exception',
+ 'portage.getbinpkg',
+ 'portage.locks',
+ 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
+ 'portage.mail',
+ 'portage.manifest:Manifest',
+ 'portage.output',
+ 'portage.output:bold,colorize',
+ 'portage.package.ebuild.doebuild:doebuild,' + \
+ 'doebuild_environment,spawn,spawnebuild',
+ 'portage.package.ebuild.config:autouse,best_from_dict,' + \
+ 'check_config_instance,config',
+ 'portage.package.ebuild.deprecated_profile_check:' + \
+ 'deprecated_profile_check',
+ 'portage.package.ebuild.digestcheck:digestcheck',
+ 'portage.package.ebuild.digestgen:digestgen',
+ 'portage.package.ebuild.fetch:fetch',
+ 'portage.package.ebuild.getmaskingreason:getmaskingreason',
+ 'portage.package.ebuild.getmaskingstatus:getmaskingstatus',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+ 'portage.process',
+ 'portage.process:atexit_register,run_exitfuncs',
+ 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
+ 'parse_updates,update_config_files,update_dbentries,' + \
+ 'update_dbentry',
+ 'portage.util',
+ 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
+ 'apply_recursive_permissions,dump_traceback,getconfig,' + \
+ 'grabdict,grabdict_package,grabfile,grabfile_package,' + \
+ 'map_dictlist_vals,new_protect_filename,normalize_path,' + \
+ 'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
+ 'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
+ 'writemsg_stdout,write_atomic',
+ 'portage.util.digraph:digraph',
+ 'portage.util.env_update:env_update',
+ 'portage.util.ExtractKernelVersion:ExtractKernelVersion',
+ 'portage.util.listdir:cacheddir,listdir',
+ 'portage.util.movefile:movefile',
+ 'portage.util.mtimedb:MtimeDB',
+ 'portage.versions',
+ 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,' + \
+ 'cpv_getkey@getCPFromCPV,endversion_keys,' + \
+ 'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
+ 'portage.xpak',
+ 'time',
+ )
+
+ try:
+ from collections import OrderedDict
+ except ImportError:
+ proxy.lazyimport.lazyimport(globals(),
+ 'portage.cache.mappings:OrderedDict')
+
+ import portage.const
+ from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
+ USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
+ PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
+ EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
+ MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
+ DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
+ INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
+ INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
+
+except ImportError as e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
+ sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
+ sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
+ sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
+ sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
+ sys.stderr.write("!!! a recovery of portage.\n")
+ sys.stderr.write(" "+str(e)+"\n\n")
+ raise
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+# Assume utf_8 fs encoding everywhere except in merge code, where the
+# user's locale is respected.
+_encodings = {
+ 'content' : 'utf_8',
+ 'fs' : 'utf_8',
+ 'merge' : sys.getfilesystemencoding(),
+ 'repo.content' : 'utf_8',
+ 'stdio' : 'utf_8',
+}
+
+# This can happen if python is built with USE=build (stage 1).
+if _encodings['merge'] is None:
+ _encodings['merge'] = 'ascii'
+
+if sys.hexversion >= 0x3000000:
+ def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
+ if isinstance(s, str):
+ s = s.encode(encoding, errors)
+ return s
+
+ def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
+ if isinstance(s, bytes):
+ s = str(s, encoding=encoding, errors=errors)
+ return s
+else:
+ def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
+ if isinstance(s, unicode):
+ s = s.encode(encoding, errors)
+ return s
+
+ def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
+ if isinstance(s, bytes):
+ s = unicode(s, encoding=encoding, errors=errors)
+ return s
+
+class _unicode_func_wrapper(object):
+ """
+ Wraps a function, converts arguments from unicode to bytes,
+ and return values to unicode from bytes. Function calls
+ will raise UnicodeEncodeError if an argument fails to be
+ encoded with the required encoding. Return values that
+ are single strings are decoded with errors='replace'. Return
+ values that are lists of strings are decoded with errors='strict'
+ and elements that fail to be decoded are omitted from the returned
+ list.
+ """
+ __slots__ = ('_func', '_encoding')
+
+ def __init__(self, func, encoding=_encodings['fs']):
+ self._func = func
+ self._encoding = encoding
+
+ def __call__(self, *args, **kwargs):
+
+ encoding = self._encoding
+ wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in args]
+ if kwargs:
+ wrapped_kwargs = dict(
+ (k, _unicode_encode(v, encoding=encoding, errors='strict'))
+ for k, v in kwargs.items())
+ else:
+ wrapped_kwargs = {}
+
+ rval = self._func(*wrapped_args, **wrapped_kwargs)
+
+ # Don't use isinstance() since we don't want to convert subclasses
+ # of tuple such as posix.stat_result in python-3.2.
+ if rval.__class__ in (list, tuple):
+ decoded_rval = []
+ for x in rval:
+ try:
+ x = _unicode_decode(x, encoding=encoding, errors='strict')
+ except UnicodeDecodeError:
+ pass
+ else:
+ decoded_rval.append(x)
+
+ if isinstance(rval, tuple):
+ rval = tuple(decoded_rval)
+ else:
+ rval = decoded_rval
+ else:
+ rval = _unicode_decode(rval, encoding=encoding, errors='replace')
+
+ return rval
+
+class _unicode_module_wrapper(object):
+ """
+ Wraps a module and wraps all functions with _unicode_func_wrapper.
+ """
+ __slots__ = ('_mod', '_encoding', '_overrides', '_cache')
+
+ def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
+ object.__setattr__(self, '_mod', mod)
+ object.__setattr__(self, '_encoding', encoding)
+ object.__setattr__(self, '_overrides', overrides)
+ if cache:
+ cache = {}
+ else:
+ cache = None
+ object.__setattr__(self, '_cache', cache)
+
+ def __getattribute__(self, attr):
+ cache = object.__getattribute__(self, '_cache')
+ if cache is not None:
+ result = cache.get(attr)
+ if result is not None:
+ return result
+ result = getattr(object.__getattribute__(self, '_mod'), attr)
+ encoding = object.__getattribute__(self, '_encoding')
+ overrides = object.__getattribute__(self, '_overrides')
+ override = None
+ if overrides is not None:
+ override = overrides.get(id(result))
+ if override is not None:
+ result = override
+ elif isinstance(result, type):
+ pass
+ elif type(result) is types.ModuleType:
+ result = _unicode_module_wrapper(result,
+ encoding=encoding, overrides=overrides)
+ elif hasattr(result, '__call__'):
+ result = _unicode_func_wrapper(result, encoding=encoding)
+ if cache is not None:
+ cache[attr] = result
+ return result
+
+import os as _os
+_os_overrides = {
+ id(_os.fdopen) : _os.fdopen,
+ id(_os.mkfifo) : _os.mkfifo,
+ id(_os.popen) : _os.popen,
+ id(_os.read) : _os.read,
+ id(_os.system) : _os.system,
+}
+
+if hasattr(_os, 'statvfs'):
+ _os_overrides[id(_os.statvfs)] = _os.statvfs
+
+os = _unicode_module_wrapper(_os, overrides=_os_overrides,
+ encoding=_encodings['fs'])
+_os_merge = _unicode_module_wrapper(_os,
+ encoding=_encodings['merge'], overrides=_os_overrides)
+
+import shutil as _shutil
+shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
+
+# Imports below this point rely on the above unicode wrapper definitions.
+try:
+ __import__('selinux')
+ import portage._selinux
+ selinux = _unicode_module_wrapper(_selinux,
+ encoding=_encodings['fs'])
+ _selinux_merge = _unicode_module_wrapper(_selinux,
+ encoding=_encodings['merge'])
+except (ImportError, OSError) as e:
+ if isinstance(e, OSError):
+ sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
+ del e
+ _selinux = None
+ selinux = None
+ _selinux_merge = None
+
+# ===========================================================================
+# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
+# ===========================================================================
+
+_python_interpreter = os.path.realpath(sys.executable)
+_bin_path = PORTAGE_BIN_PATH
+_pym_path = PORTAGE_PYM_PATH
+
+def _shell_quote(s):
+ """
+ Quote a string in double-quotes and use backslashes to
+ escape any backslashes, double-quotes, dollar signs, or
+ backquotes in the string.
+ """
+ for letter in "\\\"$`":
+ if letter in s:
+ s = s.replace(letter, "\\" + letter)
+ return "\"%s\"" % s
+
+bsd_chflags = None
+
+if platform.system() in ('FreeBSD',):
+
+ class bsd_chflags(object):
+
+ @classmethod
+ def chflags(cls, path, flags, opts=""):
+ cmd = 'chflags %s %o %s' % (opts, flags, _shell_quote(path))
+ status, output = subprocess_getstatusoutput(cmd)
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ return
+ # Try to generate an ENOENT error if appropriate.
+ if 'h' in opts:
+ _os_merge.lstat(path)
+ else:
+ _os_merge.stat(path)
+ # Make sure the binary exists.
+ if not portage.process.find_binary('chflags'):
+ raise portage.exception.CommandNotFound('chflags')
+ # Now we're not sure exactly why it failed or what
+ # the real errno was, so just report EPERM.
+ e = OSError(errno.EPERM, output)
+ e.errno = errno.EPERM
+ e.filename = path
+ e.message = output
+ raise e
+
+ @classmethod
+ def lchflags(cls, path, flags):
+ return cls.chflags(path, flags, opts='-h')
+
+def load_mod(name):
+ modname = ".".join(name.split(".")[:-1])
+ mod = __import__(modname)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def getcwd():
+ "this fixes situations where the current directory doesn't exist"
+ try:
+ return os.getcwd()
+ except OSError: #dir doesn't exist
+ os.chdir("/")
+ return "/"
+getcwd()
+
+def abssymlink(symlink):
+ "This reads symlinks, resolving the relative symlinks, and returning the absolute."
+ mylink=os.readlink(symlink)
+ if mylink[0] != '/':
+ mydir=os.path.dirname(symlink)
+ mylink=mydir+"/"+mylink
+ return os.path.normpath(mylink)
+
+_doebuild_manifest_exempt_depend = 0
+
+_testing_eapis = frozenset(["4-python"])
+_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1"])
+
+def _eapi_is_deprecated(eapi):
+ return eapi in _deprecated_eapis
+
+def eapi_is_supported(eapi):
+ if not isinstance(eapi, basestring):
+ # Only call str() when necessary since with python2 it
+ # can trigger UnicodeEncodeError if EAPI is corrupt.
+ eapi = str(eapi)
+ eapi = eapi.strip()
+
+ if _eapi_is_deprecated(eapi):
+ return True
+
+ if eapi in _testing_eapis:
+ return True
+
+ try:
+ eapi = int(eapi)
+ except ValueError:
+ eapi = -1
+ if eapi < 0:
+ return False
+ return eapi <= portage.const.EAPI
+
+# Generally, it's best not to assume that cache entries for unsupported EAPIs
+# can be validated. However, the current package manager specification does not
+# guarantee that the EAPI can be parsed without sourcing the ebuild, so
+# it's too costly to discard existing cache entries for unsupported EAPIs.
+# Therefore, by default, assume that cache entries for unsupported EAPIs can be
+# validated. If FEATURES=parse-eapi-* is enabled, this assumption is discarded
+# since the EAPI can be determined without the incurring the cost of sourcing
+# the ebuild.
+_validate_cache_for_unsupported_eapis = True
+
+_parse_eapi_ebuild_head_re = re.compile(r'^EAPI=[\'"]?([^\'"#]*)')
+_parse_eapi_ebuild_head_max_lines = 30
+
+def _parse_eapi_ebuild_head(f):
+ count = 0
+ for line in f:
+ m = _parse_eapi_ebuild_head_re.match(line)
+ if m is not None:
+ return m.group(1).strip()
+ count += 1
+ if count >= _parse_eapi_ebuild_head_max_lines:
+ break
+ return '0'
+
+def _movefile(src, dest, **kwargs):
+ """Calls movefile and raises a PortageException if an error occurs."""
+ if movefile(src, dest, **kwargs) is None:
+ raise portage.exception.PortageException(
+ "mv '%s' '%s'" % (src, dest))
+
+auxdbkeys = (
+ 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
+ 'PDEPEND', 'PROVIDE', 'EAPI',
+ 'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
+ 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
+)
+auxdbkeylen=len(auxdbkeys)
+
+def portageexit():
+ if data.secpass > 1 and os.environ.get("SANDBOX_ON") != "1":
+ close_portdbapi_caches()
+
+def create_trees(config_root=None, target_root=None, trees=None):
+ if trees is None:
+ trees = {}
+ else:
+ # clean up any existing portdbapi instances
+ for myroot in trees:
+ portdb = trees[myroot]["porttree"].dbapi
+ portdb.close_caches()
+ portdbapi.portdbapi_instances.remove(portdb)
+ del trees[myroot]["porttree"], myroot, portdb
+
+ settings = config(config_root=config_root, target_root=target_root,
+ config_incrementals=portage.const.INCREMENTALS)
+ settings.lock()
+
+ myroots = [(settings["ROOT"], settings)]
+ if settings["ROOT"] != "/":
+
+ # When ROOT != "/" we only want overrides from the calling
+ # environment to apply to the config that's associated
+ # with ROOT != "/", so pass a nearly empty dict for the env parameter.
+ clean_env = {}
+ for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_USERNAME',
+ 'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
+ 'ftp_proxy', 'http_proxy', 'no_proxy'):
+ v = settings.get(k)
+ if v is not None:
+ clean_env[k] = v
+ settings = config(config_root=None, target_root="/", env=clean_env)
+ settings.lock()
+ myroots.append((settings["ROOT"], settings))
+
+ for myroot, mysettings in myroots:
+ trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
+ trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals)
+ trees[myroot].addLazySingleton(
+ "vartree", vartree, myroot, categories=mysettings.categories,
+ settings=mysettings)
+ trees[myroot].addLazySingleton("porttree",
+ portagetree, myroot, settings=mysettings)
+ trees[myroot].addLazySingleton("bintree",
+ binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
+ return trees
+
+if VERSION == 'HEAD':
+ class _LazyVersion(proxy.objectproxy.ObjectProxy):
+ def _get_target(self):
+ global VERSION
+ if VERSION is not self:
+ return VERSION
+ if os.path.isdir(os.path.join(PORTAGE_BASE_PATH, '.git')):
+ status, output = subprocess_getstatusoutput((
+ "cd %s ; git describe --tags || exit $? ; " + \
+ "if [ -n \"`git diff-index --name-only --diff-filter=M HEAD`\" ] ; " + \
+ "then echo modified ; git rev-list --format=%%ct -n 1 HEAD ; fi ; " + \
+ "exit 0") % _shell_quote(PORTAGE_BASE_PATH))
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ output_lines = output.splitlines()
+ if output_lines:
+ version_split = output_lines[0].split('-')
+ if version_split:
+ VERSION = version_split[0].lstrip('v')
+ patchlevel = False
+ if len(version_split) > 1:
+ patchlevel = True
+ VERSION = "%s_p%s" %(VERSION, version_split[1])
+ if len(output_lines) > 1 and output_lines[1] == 'modified':
+ head_timestamp = None
+ if len(output_lines) > 3:
+ try:
+ head_timestamp = long(output_lines[3])
+ except ValueError:
+ pass
+ timestamp = long(time.time())
+ if head_timestamp is not None and timestamp > head_timestamp:
+ timestamp = timestamp - head_timestamp
+ if not patchlevel:
+ VERSION = "%s_p0" % (VERSION,)
+ VERSION = "%s_p%d" % (VERSION, timestamp)
+ return VERSION
+ VERSION = 'HEAD'
+ return VERSION
+ VERSION = _LazyVersion()
+
+if "_legacy_globals_constructed" in globals():
+ # The module has been reloaded, so perform any relevant cleanup
+ # and prevent memory leaks.
+ if "db" in _legacy_globals_constructed:
+ try:
+ db
+ except NameError:
+ pass
+ else:
+ if isinstance(db, dict) and db:
+ for _x in db.values():
+ try:
+ if "porttree" in _x.lazy_items:
+ continue
+ except (AttributeError, TypeError):
+ continue
+ try:
+ _x = _x["porttree"].dbapi
+ except (AttributeError, KeyError):
+ continue
+ if not isinstance(_x, portdbapi):
+ continue
+ _x.close_caches()
+ try:
+ portdbapi.portdbapi_instances.remove(_x)
+ except ValueError:
+ pass
+ del _x
+
+class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
+
+ __slots__ = ('_name',)
+
+ def __init__(self, name):
+ proxy.objectproxy.ObjectProxy.__init__(self)
+ object.__setattr__(self, '_name', name)
+
+ def _get_target(self):
+ name = object.__getattribute__(self, '_name')
+ from portage._legacy_globals import _get_legacy_global
+ return _get_legacy_global(name)
+
+_legacy_global_var_names = ("archlist", "db", "features",
+ "groups", "mtimedb", "mtimedbfile", "pkglines",
+ "portdb", "profiledir", "root", "selinux_enabled",
+ "settings", "thirdpartymirrors")
+
+for k in _legacy_global_var_names:
+ globals()[k] = _LegacyGlobalProxy(k)
+del k
+
+_legacy_globals_constructed = set()
+
+def _disable_legacy_globals():
+ """
+ This deletes the ObjectProxy instances that are used
+ for lazy initialization of legacy global variables.
+ The purpose of deleting them is to prevent new code
+ from referencing these deprecated variables.
+ """
+ global _legacy_global_var_names
+ for k in _legacy_global_var_names:
+ globals().pop(k, None)
diff --git a/portage_with_autodep/pym/portage/_global_updates.py b/portage_with_autodep/pym/portage/_global_updates.py
new file mode 100644
index 0000000..868d1ee
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_global_updates.py
@@ -0,0 +1,250 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import stat
+
+from portage import best, os
+from portage.const import WORLD_FILE
+from portage.data import secpass
+from portage.exception import DirectoryNotFound
+from portage.localization import _
+from portage.output import bold, colorize
+from portage.update import grab_updates, parse_updates, update_config_files, update_dbentry
+from portage.util import grabfile, shlex_split, \
+ writemsg, writemsg_stdout, write_atomic
+
+def _global_updates(trees, prev_mtimes, quiet=False):
+ """
+ Perform new global updates if they exist in 'profiles/updates/'
+ subdirectories of all active repositories (PORTDIR + PORTDIR_OVERLAY).
+ This simply returns if ROOT != "/" (when len(trees) != 1). If ROOT != "/"
+ then the user should instead use emaint --fix movebin and/or moveinst.
+
+ @param trees: A dictionary containing portage trees.
+ @type trees: dict
+ @param prev_mtimes: A dictionary containing mtimes of files located in
+ $PORTDIR/profiles/updates/.
+ @type prev_mtimes: dict
+ @rtype: bool
+ @return: True if update commands have been performed, otherwise False
+ """
+ # only do this if we're root and not running repoman/ebuild digest
+
+ retupd = False
+ if secpass < 2 or \
+ "SANDBOX_ACTIVE" in os.environ or \
+ len(trees) != 1:
+ return retupd
+ root = "/"
+ mysettings = trees[root]["vartree"].settings
+ portdb = trees[root]["porttree"].dbapi
+ vardb = trees[root]["vartree"].dbapi
+ bindb = trees[root]["bintree"].dbapi
+ if not os.access(bindb.bintree.pkgdir, os.W_OK):
+ bindb = None
+ else:
+ # Call binarytree.populate(), since we want to make sure it's
+ # only populated with local packages here (getbinpkgs=0).
+ bindb.bintree.populate()
+
+ world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
+ world_list = grabfile(world_file)
+ world_modified = False
+ world_warnings = set()
+ updpath_map = {}
+ # Maps repo_name to list of updates. If a given repo has no updates
+ # directory, it will be omitted. If a repo has an updates directory
+ # but none need to be applied (according to timestamp logic), the
+ # value in the dict will be an empty list.
+ repo_map = {}
+ timestamps = {}
+
+ update_notice_printed = False
+ for repo_name in portdb.getRepositories():
+ repo = portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ if updpath in updpath_map:
+ repo_map[repo_name] = updpath_map[updpath]
+ continue
+
+ try:
+ if mysettings.get("PORTAGE_CALLER") == "fixpackages":
+ update_data = grab_updates(updpath)
+ else:
+ update_data = grab_updates(updpath, prev_mtimes)
+ except DirectoryNotFound:
+ continue
+ myupd = []
+ updpath_map[updpath] = myupd
+ repo_map[repo_name] = myupd
+ if len(update_data) > 0:
+ for mykey, mystat, mycontent in update_data:
+ if not update_notice_printed:
+ update_notice_printed = True
+ writemsg_stdout("\n")
+ if quiet:
+ writemsg_stdout(colorize("GOOD",
+ _("Performing Global Updates\n")))
+ writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+ else:
+ writemsg_stdout(colorize("GOOD",
+ _("Performing Global Updates:\n")))
+ writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+ writemsg_stdout(_(" %s='update pass' %s='binary update' "
+ "%s='/var/db update' %s='/var/db move'\n"
+ " %s='/var/db SLOT move' %s='binary move' "
+ "%s='binary SLOT move'\n %s='update /etc/portage/package.*'\n") % \
+ (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
+ valid_updates, errors = parse_updates(mycontent)
+ myupd.extend(valid_updates)
+ if not quiet:
+ writemsg_stdout(bold(mykey))
+ writemsg_stdout(len(valid_updates) * "." + "\n")
+ if len(errors) == 0:
+ # Update our internal mtime since we
+ # processed all of our directives.
+ timestamps[mykey] = mystat[stat.ST_MTIME]
+ else:
+ for msg in errors:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ if myupd:
+ retupd = True
+
+ master_repo = portdb.getRepositoryName(portdb.porttree_root)
+ if master_repo in repo_map:
+ repo_map['DEFAULT'] = repo_map[master_repo]
+
+ for repo_name, myupd in repo_map.items():
+ if repo_name == 'DEFAULT':
+ continue
+ if not myupd:
+ continue
+
+ def repo_match(repository):
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ def _world_repo_match(atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
+ if matches and \
+ repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
+ if portdb.match(atoma):
+ world_warnings.add((atoma, atomb))
+ return True
+ else:
+ return False
+
+ for update_cmd in myupd:
+ for pos, atom in enumerate(world_list):
+ new_atom = update_dbentry(update_cmd, atom)
+ if atom != new_atom:
+ if _world_repo_match(atom, new_atom):
+ world_list[pos] = new_atom
+ world_modified = True
+
+ for update_cmd in myupd:
+ if update_cmd[0] == "move":
+ moves = vardb.move_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "@")
+ if bindb:
+ moves = bindb.move_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "%")
+ elif update_cmd[0] == "slotmove":
+ moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "s")
+ if bindb:
+ moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "S")
+
+ if world_modified:
+ world_list.sort()
+ write_atomic(world_file,
+ "".join("%s\n" % (x,) for x in world_list))
+ if world_warnings:
+ # XXX: print warning that we've updated world entries
+ # and the old name still matches something (from an overlay)?
+ pass
+
+ if retupd:
+
+ def _config_repo_match(repo_name, atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
+ if not matches:
+ return False
+ repository = vardb.aux_get(best(matches), ['repository'])[0]
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ update_config_files(root,
+ shlex_split(mysettings.get("CONFIG_PROTECT", "")),
+ shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
+ repo_map, match_callback=_config_repo_match)
+
+ # The above global updates proceed quickly, so they
+ # are considered a single mtimedb transaction.
+ if timestamps:
+ # We do not update the mtime in the mtimedb
+ # until after _all_ of the above updates have
+ # been processed because the mtimedb will
+ # automatically commit when killed by ctrl C.
+ for mykey, mtime in timestamps.items():
+ prev_mtimes[mykey] = mtime
+
+ do_upgrade_packagesmessage = False
+ # We gotta do the brute force updates for these now.
+ if mysettings.get("PORTAGE_CALLER") == "fixpackages" or \
+ "fixpackages" in mysettings.features:
+ def onUpdate(maxval, curval):
+ if curval > 0:
+ writemsg_stdout("#")
+ if quiet:
+ onUpdate = None
+ vardb.update_ents(repo_map, onUpdate=onUpdate)
+ if bindb:
+ def onUpdate(maxval, curval):
+ if curval > 0:
+ writemsg_stdout("*")
+ if quiet:
+ onUpdate = None
+ bindb.update_ents(repo_map, onUpdate=onUpdate)
+ else:
+ do_upgrade_packagesmessage = 1
+
+ # Update progress above is indicated by characters written to stdout so
+ # we print a couple new lines here to separate the progress output from
+ # what follows.
+ print()
+ print()
+
+ if do_upgrade_packagesmessage and bindb and \
+ bindb.cpv_all():
+ writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
+ writemsg_stdout(bold(_("Note: This can take a very long time.")))
+ writemsg_stdout("\n")
+
+ return retupd
diff --git a/portage_with_autodep/pym/portage/_legacy_globals.py b/portage_with_autodep/pym/portage/_legacy_globals.py
new file mode 100644
index 0000000..615591a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_legacy_globals.py
@@ -0,0 +1,81 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.const import CACHE_PATH, PROFILE_PATH
+
+def _get_legacy_global(name):
+ constructed = portage._legacy_globals_constructed
+ if name in constructed:
+ return getattr(portage, name)
+
+ if name == 'portdb':
+ portage.portdb = portage.db[portage.root]["porttree"].dbapi
+ constructed.add(name)
+ return getattr(portage, name)
+
+ elif name in ('mtimedb', 'mtimedbfile'):
+ portage.mtimedbfile = os.path.join(portage.settings['EROOT'],
+ CACHE_PATH, "mtimedb")
+ constructed.add('mtimedbfile')
+ portage.mtimedb = portage.MtimeDB(portage.mtimedbfile)
+ constructed.add('mtimedb')
+ return getattr(portage, name)
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(0o22)
+
+ kwargs = {}
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+ kwargs[k] = os.environ.get(envvar)
+
+ portage._initializing_globals = True
+ portage.db = portage.create_trees(**kwargs)
+ constructed.add('db')
+ del portage._initializing_globals
+
+ settings = portage.db["/"]["vartree"].settings
+
+ for root in portage.db:
+ if root != "/":
+ settings = portage.db[root]["vartree"].settings
+ break
+
+ portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
+
+ portage.settings = settings
+ constructed.add('settings')
+
+ portage.root = root
+ constructed.add('root')
+
+ # COMPATIBILITY
+ # These attributes should not be used within
+ # Portage under any circumstances.
+
+ portage.archlist = settings.archlist()
+ constructed.add('archlist')
+
+ portage.features = settings.features
+ constructed.add('features')
+
+ portage.groups = settings["ACCEPT_KEYWORDS"].split()
+ constructed.add('groups')
+
+ portage.pkglines = settings.packages
+ constructed.add('pkglines')
+
+ portage.selinux_enabled = settings.selinux_enabled()
+ constructed.add('selinux_enabled')
+
+ portage.thirdpartymirrors = settings.thirdpartymirrors()
+ constructed.add('thirdpartymirrors')
+
+ profiledir = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
+ if not os.path.isdir(profiledir):
+ profiledir = None
+ portage.profiledir = profiledir
+ constructed.add('profiledir')
+
+ return getattr(portage, name)
diff --git a/portage_with_autodep/pym/portage/_selinux.py b/portage_with_autodep/pym/portage/_selinux.py
new file mode 100644
index 0000000..9470978
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_selinux.py
@@ -0,0 +1,129 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Don't use the unicode-wrapped os and shutil modules here since
+# the whole _selinux module itself will be wrapped.
+import os
+import shutil
+
+import portage
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.localization import _
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'selinux')
+
+def copyfile(src, dest):
+ src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
+ dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(src)
+ if rc < 0:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ raise OSError(_("copyfile: Failed getting context of \"%s\".") % src)
+
+ setfscreate(ctx)
+ try:
+ shutil.copyfile(src, dest)
+ finally:
+ setfscreate()
+
+def getcontext():
+ (rc, ctx) = selinux.getcon()
+ if rc < 0:
+ raise OSError(_("getcontext: Failed getting current process context."))
+
+ return ctx
+
+def is_selinux_enabled():
+ return selinux.is_selinux_enabled()
+
+def mkdir(target, refdir):
+ target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
+ refdir = _unicode_encode(refdir, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.getfilecon(refdir)
+ if rc < 0:
+ refdir = _unicode_decode(refdir, encoding=_encodings['fs'],
+ errors='replace')
+ raise OSError(
+ _("mkdir: Failed getting context of reference directory \"%s\".") \
+ % refdir)
+
+ setfscreate(ctx)
+ try:
+ os.mkdir(target)
+ finally:
+ setfscreate()
+
+def rename(src, dest):
+ src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
+ dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(src)
+ if rc < 0:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ raise OSError(_("rename: Failed getting context of \"%s\".") % src)
+
+ setfscreate(ctx)
+ try:
+ os.rename(src,dest)
+ finally:
+ setfscreate()
+
+def settype(newtype):
+ ret = getcontext().split(":")
+ ret[2] = newtype
+ return ":".join(ret)
+
+def setexec(ctx="\n"):
+ ctx = _unicode_encode(ctx, encoding=_encodings['content'], errors='strict')
+ if selinux.setexeccon(ctx) < 0:
+ ctx = _unicode_decode(ctx, encoding=_encodings['content'],
+ errors='replace')
+ if selinux.security_getenforce() == 1:
+ raise OSError(_("Failed setting exec() context \"%s\".") % ctx)
+ else:
+ portage.writemsg("!!! " + \
+ _("Failed setting exec() context \"%s\".") % ctx, \
+ noiselevel=-1)
+
+def setfscreate(ctx="\n"):
+ ctx = _unicode_encode(ctx,
+ encoding=_encodings['content'], errors='strict')
+ if selinux.setfscreatecon(ctx) < 0:
+ ctx = _unicode_decode(ctx,
+ encoding=_encodings['content'], errors='replace')
+ raise OSError(
+ _("setfscreate: Failed setting fs create context \"%s\".") % ctx)
+
+def spawn_wrapper(spawn_func, selinux_type):
+
+ selinux_type = _unicode_encode(selinux_type,
+ encoding=_encodings['content'], errors='strict')
+
+ def wrapper_func(*args, **kwargs):
+ con = settype(selinux_type)
+ setexec(con)
+ try:
+ return spawn_func(*args, **kwargs)
+ finally:
+ setexec()
+
+ return wrapper_func
+
+def symlink(target, link, reflnk):
+ target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
+ link = _unicode_encode(link, encoding=_encodings['fs'], errors='strict')
+ reflnk = _unicode_encode(reflnk, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(reflnk)
+ if rc < 0:
+ reflnk = _unicode_decode(reflnk, encoding=_encodings['fs'],
+ errors='replace')
+ raise OSError(
+ _("symlink: Failed getting context of reference symlink \"%s\".") \
+ % reflnk)
+
+ setfscreate(ctx)
+ try:
+ os.symlink(target, link)
+ finally:
+ setfscreate()
diff --git a/portage_with_autodep/pym/portage/_sets/__init__.py b/portage_with_autodep/pym/portage/_sets/__init__.py
new file mode 100644
index 0000000..1b3484e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/__init__.py
@@ -0,0 +1,245 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+__all__ = ["SETPREFIX", "get_boolean", "SetConfigError",
+ "SetConfig", "load_default_config"]
+
+try:
+ from configparser import SafeConfigParser, NoOptionError
+except ImportError:
+ from ConfigParser import SafeConfigParser, NoOptionError
+from portage import os
+from portage import load_mod
+from portage.const import USER_CONFIG_PATH, GLOBAL_CONFIG_PATH
+from portage.const import _ENABLE_SET_CONFIG
+from portage.exception import PackageSetNotFound
+from portage.localization import _
+
+SETPREFIX = "@"
+
+def get_boolean(options, name, default):
+ if not name in options:
+ return default
+ elif options[name].lower() in ("1", "yes", "on", "true"):
+ return True
+ elif options[name].lower() in ("0", "no", "off", "false"):
+ return False
+ else:
+ raise SetConfigError(_("invalid value '%(value)s' for option '%(option)s'") % {"value": options[name], "option": name})
+
+class SetConfigError(Exception):
+ pass
+
+class SetConfig(object):
+ def __init__(self, paths, settings, trees):
+ self._parser = SafeConfigParser(
+ defaults={
+ "EPREFIX" : settings["EPREFIX"],
+ "EROOT" : settings["EROOT"],
+ "PORTAGE_CONFIGROOT" : settings["PORTAGE_CONFIGROOT"],
+ "ROOT" : settings["ROOT"],
+ })
+
+ if _ENABLE_SET_CONFIG:
+ self._parser.read(paths)
+ else:
+ self._create_default_config()
+
+ self.errors = []
+ self.psets = {}
+ self.trees = trees
+ self.settings = settings
+ self._parsed = False
+ self.active = []
+
+ def _create_default_config(self):
+ """
+ Create a default hardcoded set configuration for a portage version
+ that does not support set configuration files. This is only used
+ in the current branch of portage if _ENABLE_SET_CONFIG is False.
+ Even if it's not used in this branch, keep it here in order to
+ minimize the diff between branches.
+
+ [world]
+ class = portage.sets.base.DummyPackageSet
+ packages = @selected @system
+
+ [selected]
+ class = portage.sets.files.WorldSelectedSet
+
+ [system]
+ class = portage.sets.profiles.PackagesSystemSet
+
+ """
+ parser = self._parser
+
+ parser.add_section("world")
+ parser.set("world", "class", "portage.sets.base.DummyPackageSet")
+ parser.set("world", "packages", "@selected @system")
+
+ parser.add_section("selected")
+ parser.set("selected", "class", "portage.sets.files.WorldSelectedSet")
+
+ parser.add_section("system")
+ parser.set("system", "class", "portage.sets.profiles.PackagesSystemSet")
+
+ def update(self, setname, options):
+ parser = self._parser
+ self.errors = []
+ if not setname in self.psets:
+ options["name"] = setname
+ options["world-candidate"] = "False"
+
+ # for the unlikely case that there is already a section with the requested setname
+ import random
+ while setname in parser.sections():
+ setname = "%08d" % random.randint(0, 10**10)
+
+ parser.add_section(setname)
+ for k, v in options.items():
+ parser.set(setname, k, v)
+ else:
+ section = self.psets[setname].creator
+ if parser.has_option(section, "multiset") and \
+ parser.getboolean(section, "multiset"):
+ self.errors.append(_("Invalid request to reconfigure set '%(set)s' generated "
+ "by multiset section '%(section)s'") % {"set": setname, "section": section})
+ return
+ for k, v in options.items():
+ parser.set(section, k, v)
+ self._parse(update=True)
+
+ def _parse(self, update=False):
+ if self._parsed and not update:
+ return
+ parser = self._parser
+ for sname in parser.sections():
+ # find classname for current section, default to file based sets
+ if not parser.has_option(sname, "class"):
+ classname = "portage._sets.files.StaticFileSet"
+ else:
+ classname = parser.get(sname, "class")
+
+ if classname.startswith('portage.sets.'):
+ # The module has been made private, but we still support
+ # the previous namespace for sets.conf entries.
+ classname = classname.replace('sets', '_sets', 1)
+
+ # try to import the specified class
+ try:
+ setclass = load_mod(classname)
+ except (ImportError, AttributeError):
+ try:
+ setclass = load_mod("portage._sets." + classname)
+ except (ImportError, AttributeError):
+ self.errors.append(_("Could not import '%(class)s' for section "
+ "'%(section)s'") % {"class": classname, "section": sname})
+ continue
+ # prepare option dict for the current section
+ optdict = {}
+ for oname in parser.options(sname):
+ optdict[oname] = parser.get(sname, oname)
+
+ # create single or multiple instances of the given class depending on configuration
+ if parser.has_option(sname, "multiset") and \
+ parser.getboolean(sname, "multiset"):
+ if hasattr(setclass, "multiBuilder"):
+ newsets = {}
+ try:
+ newsets = setclass.multiBuilder(optdict, self.settings, self.trees)
+ except SetConfigError as e:
+ self.errors.append(_("Configuration error in section '%s': %s") % (sname, str(e)))
+ continue
+ for x in newsets:
+ if x in self.psets and not update:
+ self.errors.append(_("Redefinition of set '%s' (sections: '%s', '%s')") % (x, self.psets[x].creator, sname))
+ newsets[x].creator = sname
+ if parser.has_option(sname, "world-candidate") and \
+ parser.getboolean(sname, "world-candidate"):
+ newsets[x].world_candidate = True
+ self.psets.update(newsets)
+ else:
+ self.errors.append(_("Section '%(section)s' is configured as multiset, but '%(class)s' "
+ "doesn't support that configuration") % {"section": sname, "class": classname})
+ continue
+ else:
+ try:
+ setname = parser.get(sname, "name")
+ except NoOptionError:
+ setname = sname
+ if setname in self.psets and not update:
+ self.errors.append(_("Redefinition of set '%s' (sections: '%s', '%s')") % (setname, self.psets[setname].creator, sname))
+ if hasattr(setclass, "singleBuilder"):
+ try:
+ self.psets[setname] = setclass.singleBuilder(optdict, self.settings, self.trees)
+ self.psets[setname].creator = sname
+ if parser.has_option(sname, "world-candidate") and \
+ parser.getboolean(sname, "world-candidate"):
+ self.psets[setname].world_candidate = True
+ except SetConfigError as e:
+ self.errors.append(_("Configuration error in section '%s': %s") % (sname, str(e)))
+ continue
+ else:
+ self.errors.append(_("'%(class)s' does not support individual set creation, section '%(section)s' "
+ "must be configured as multiset") % {"class": classname, "section": sname})
+ continue
+ self._parsed = True
+
+ def getSets(self):
+ self._parse()
+ return self.psets.copy()
+
+ def getSetAtoms(self, setname, ignorelist=None):
+ """
+ This raises PackageSetNotFound if the give setname does not exist.
+ """
+ self._parse()
+ try:
+ myset = self.psets[setname]
+ except KeyError:
+ raise PackageSetNotFound(setname)
+ myatoms = myset.getAtoms()
+ parser = self._parser
+
+ if ignorelist is None:
+ ignorelist = set()
+
+ ignorelist.add(setname)
+ for n in myset.getNonAtoms():
+ if n.startswith(SETPREFIX):
+ s = n[len(SETPREFIX):]
+ if s in self.psets:
+ if s not in ignorelist:
+ myatoms.update(self.getSetAtoms(s,
+ ignorelist=ignorelist))
+ else:
+ raise PackageSetNotFound(s)
+
+ return myatoms
+
+def load_default_config(settings, trees):
+
+ if not _ENABLE_SET_CONFIG:
+ return SetConfig(None, settings, trees)
+
+ global_config_path = GLOBAL_CONFIG_PATH
+ if settings['EPREFIX']:
+ global_config_path = os.path.join(settings['EPREFIX'],
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ def _getfiles():
+ for path, dirs, files in os.walk(os.path.join(global_config_path, "sets")):
+ for f in files:
+ if not f.startswith(b'.'):
+ yield os.path.join(path, f)
+
+ dbapi = trees["porttree"].dbapi
+ for repo in dbapi.getRepositories():
+ path = dbapi.getRepositoryPath(repo)
+ yield os.path.join(path, "sets.conf")
+
+ yield os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH, "sets.conf")
+
+ return SetConfig(_getfiles(), settings, trees)
diff --git a/portage_with_autodep/pym/portage/_sets/base.py b/portage_with_autodep/pym/portage/_sets/base.py
new file mode 100644
index 0000000..c8d3ae4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/base.py
@@ -0,0 +1,264 @@
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.dep import Atom, ExtendedAtomDict, best_match_to_list, match_from_list
+from portage.exception import InvalidAtom
+from portage.versions import cpv_getkey
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+OPERATIONS = ["merge", "unmerge"]
+
+class PackageSet(object):
+ # Set this to operations that are supported by your subclass. While
+ # technically there is no difference between "merge" and "unmerge" regarding
+ # package sets, the latter doesn't make sense for some sets like "system"
+ # or "security" and therefore isn't supported by them.
+ _operations = ["merge"]
+ description = "generic package set"
+
+ def __init__(self, allow_wildcard=False, allow_repo=False):
+ self._atoms = set()
+ self._atommap = ExtendedAtomDict(set)
+ self._loaded = False
+ self._loading = False
+ self.errors = []
+ self._nonatoms = set()
+ self.world_candidate = False
+ self._allow_wildcard = allow_wildcard
+ self._allow_repo = allow_repo
+
+ def __contains__(self, atom):
+ self._load()
+ return atom in self._atoms or atom in self._nonatoms
+
+ def __iter__(self):
+ self._load()
+ for x in self._atoms:
+ yield x
+ for x in self._nonatoms:
+ yield x
+
+ def __bool__(self):
+ self._load()
+ return bool(self._atoms or self._nonatoms)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def supportsOperation(self, op):
+ if not op in OPERATIONS:
+ raise ValueError(op)
+ return op in self._operations
+
+ def _load(self):
+ if not (self._loaded or self._loading):
+ self._loading = True
+ self.load()
+ self._loaded = True
+ self._loading = False
+
+ def getAtoms(self):
+ self._load()
+ return self._atoms.copy()
+
+ def getNonAtoms(self):
+ self._load()
+ return self._nonatoms.copy()
+
+ def _setAtoms(self, atoms):
+ self._atoms.clear()
+ self._nonatoms.clear()
+ for a in atoms:
+ if not isinstance(a, Atom):
+ if isinstance(a, basestring):
+ a = a.strip()
+ if not a:
+ continue
+ try:
+ a = Atom(a, allow_wildcard=True, allow_repo=True)
+ except InvalidAtom:
+ self._nonatoms.add(a)
+ continue
+ if not self._allow_wildcard and a.extended_syntax:
+ raise InvalidAtom("extended atom syntax not allowed here")
+ if not self._allow_repo and a.repo:
+ raise InvalidAtom("repository specification not allowed here")
+ self._atoms.add(a)
+
+ self._updateAtomMap()
+
+ def load(self):
+ # This method must be overwritten by subclasses
+ # Editable sets should use the value of self._mtime to determine if they
+ # need to reload themselves
+ raise NotImplementedError()
+
+ def containsCPV(self, cpv):
+ self._load()
+ for a in self._atoms:
+ if match_from_list(a, [cpv]):
+ return True
+ return False
+
+ def getMetadata(self, key):
+ if hasattr(self, key.lower()):
+ return getattr(self, key.lower())
+ else:
+ return ""
+
+ def _updateAtomMap(self, atoms=None):
+ """Update self._atommap for specific atoms or all atoms."""
+ if not atoms:
+ self._atommap.clear()
+ atoms = self._atoms
+ for a in atoms:
+ self._atommap.setdefault(a.cp, set()).add(a)
+
+ # Not sure if this one should really be in PackageSet
+ def findAtomForPackage(self, pkg, modified_use=None):
+ """Return the best match for a given package from the arguments, or
+ None if there are no matches. This matches virtual arguments against
+ the PROVIDE metadata. This can raise an InvalidDependString exception
+ if an error occurs while parsing PROVIDE."""
+
+ if modified_use is not None and modified_use is not pkg.use.enabled:
+ pkg = pkg.copy()
+ pkg.metadata["USE"] = " ".join(modified_use)
+
+ # Atoms matched via PROVIDE must be temporarily transformed since
+ # match_from_list() only works correctly when atom.cp == pkg.cp.
+ rev_transform = {}
+ for atom in self.iterAtomsForPackage(pkg):
+ if atom.cp == pkg.cp:
+ rev_transform[atom] = atom
+ else:
+ rev_transform[Atom(atom.replace(atom.cp, pkg.cp, 1), allow_wildcard=True, allow_repo=True)] = atom
+ best_match = best_match_to_list(pkg, iter(rev_transform))
+ if best_match:
+ return rev_transform[best_match]
+ return None
+
+ def iterAtomsForPackage(self, pkg):
+ """
+ Find all matching atoms for a given package. This matches virtual
+ arguments against the PROVIDE metadata. This will raise an
+ InvalidDependString exception if PROVIDE is invalid.
+ """
+ cpv_slot_list = [pkg]
+ cp = cpv_getkey(pkg.cpv)
+ self._load() # make sure the atoms are loaded
+
+ atoms = self._atommap.get(cp)
+ if atoms:
+ for atom in atoms:
+ if match_from_list(atom, cpv_slot_list):
+ yield atom
+ provides = pkg.metadata['PROVIDE']
+ if not provides:
+ return
+ provides = provides.split()
+ for provide in provides:
+ try:
+ provided_cp = Atom(provide).cp
+ except InvalidAtom:
+ continue
+ atoms = self._atommap.get(provided_cp)
+ if atoms:
+ for atom in atoms:
+ if match_from_list(atom.replace(provided_cp, cp),
+ cpv_slot_list):
+ yield atom
+
+class EditablePackageSet(PackageSet):
+
+ def __init__(self, allow_wildcard=False, allow_repo=False):
+ super(EditablePackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+
+ def update(self, atoms):
+ self._load()
+ modified = False
+ normal_atoms = []
+ for a in atoms:
+ if not isinstance(a, Atom):
+ try:
+ a = Atom(a, allow_wildcard=True, allow_repo=True)
+ except InvalidAtom:
+ modified = True
+ self._nonatoms.add(a)
+ continue
+ if not self._allow_wildcard and a.extended_syntax:
+ raise InvalidAtom("extended atom syntax not allowed here")
+ if not self._allow_repo and a.repo:
+ raise InvalidAtom("repository specification not allowed here")
+ normal_atoms.append(a)
+
+ if normal_atoms:
+ modified = True
+ self._atoms.update(normal_atoms)
+ self._updateAtomMap(atoms=normal_atoms)
+ if modified:
+ self.write()
+
+ def add(self, atom):
+ self.update([atom])
+
+ def replace(self, atoms):
+ self._setAtoms(atoms)
+ self.write()
+
+ def remove(self, atom):
+ self._load()
+ self._atoms.discard(atom)
+ self._nonatoms.discard(atom)
+ self._updateAtomMap()
+ self.write()
+
+ def removePackageAtoms(self, cp):
+ self._load()
+ for a in list(self._atoms):
+ if a.cp == cp:
+ self.remove(a)
+ self.write()
+
+ def write(self):
+ # This method must be overwritten in subclasses that should be editable
+ raise NotImplementedError()
+
+class InternalPackageSet(EditablePackageSet):
+ def __init__(self, initial_atoms=None, allow_wildcard=False, allow_repo=True):
+ """
+ Repo atoms are allowed more often than not, so it makes sense for this
+ class to allow them by default. The Atom constructor and isvalidatom()
+ functions default to allow_repo=False, which is sufficient to ensure
+ that repo atoms are prohibited when necessary.
+ """
+ super(InternalPackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+ if initial_atoms != None:
+ self.update(initial_atoms)
+
+ def clear(self):
+ self._atoms.clear()
+ self._updateAtomMap()
+
+ def load(self):
+ pass
+
+ def write(self):
+ pass
+
+class DummyPackageSet(PackageSet):
+ def __init__(self, atoms=None):
+ super(DummyPackageSet, self).__init__()
+ if atoms:
+ self._setAtoms(atoms)
+
+ def load(self):
+ pass
+
+ def singleBuilder(cls, options, settings, trees):
+ atoms = options.get("packages", "").split()
+ return DummyPackageSet(atoms=atoms)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/_sets/dbapi.py b/portage_with_autodep/pym/portage/_sets/dbapi.py
new file mode 100644
index 0000000..0f238f0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/dbapi.py
@@ -0,0 +1,383 @@
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+
+from portage import os
+from portage.versions import catpkgsplit, catsplit, pkgcmp, best
+from portage.dep import Atom
+from portage.localization import _
+from portage._sets.base import PackageSet
+from portage._sets import SetConfigError, get_boolean
+import portage
+
+__all__ = ["CategorySet", "DowngradeSet",
+ "EverythingSet", "OwnerSet", "VariableSet"]
+
+class EverythingSet(PackageSet):
+ _operations = ["merge"]
+ description = "Package set which contains SLOT " + \
+ "atoms to match all installed packages"
+ _filter = None
+
+ def __init__(self, vdbapi, **kwargs):
+ super(EverythingSet, self).__init__()
+ self._db = vdbapi
+
+ def load(self):
+ myatoms = []
+ db_keys = ["SLOT"]
+ aux_get = self._db.aux_get
+ cp_list = self._db.cp_list
+
+ for cp in self._db.cp_all():
+ for cpv in cp_list(cp):
+ # NOTE: Create SLOT atoms even when there is only one
+ # SLOT installed, in order to avoid the possibility
+ # of unwanted upgrades as reported in bug #338959.
+ slot, = aux_get(cpv, db_keys)
+ atom = Atom("%s:%s" % (cp, slot))
+ if self._filter:
+ if self._filter(atom):
+ myatoms.append(atom)
+ else:
+ myatoms.append(atom)
+
+ self._setAtoms(myatoms)
+
+ def singleBuilder(self, options, settings, trees):
+ return EverythingSet(trees["vartree"].dbapi)
+ singleBuilder = classmethod(singleBuilder)
+
+class OwnerSet(PackageSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "that own one or more files."
+
+ def __init__(self, vardb=None, exclude_files=None, files=None):
+ super(OwnerSet, self).__init__()
+ self._db = vardb
+ self._exclude_files = exclude_files
+ self._files = files
+
+ def mapPathsToAtoms(self, paths, exclude_paths=None):
+ """
+ All paths must have $EROOT stripped from the left side.
+ """
+ rValue = set()
+ vardb = self._db
+ aux_get = vardb.aux_get
+ aux_keys = ["SLOT"]
+ if exclude_paths is None:
+ for link, p in vardb._owners.iter_owners(paths):
+ cat, pn = catpkgsplit(link.mycpv)[:2]
+ slot, = aux_get(link.mycpv, aux_keys)
+ rValue.add("%s/%s:%s" % (cat, pn, slot))
+ else:
+ all_paths = set()
+ all_paths.update(paths)
+ all_paths.update(exclude_paths)
+ exclude_atoms = set()
+ for link, p in vardb._owners.iter_owners(all_paths):
+ cat, pn = catpkgsplit(link.mycpv)[:2]
+ slot, = aux_get(link.mycpv, aux_keys)
+ atom = "%s/%s:%s" % (cat, pn, slot)
+ rValue.add(atom)
+ if p in exclude_paths:
+ exclude_atoms.add(atom)
+ rValue.difference_update(exclude_atoms)
+
+ return rValue
+
+ def load(self):
+ self._setAtoms(self.mapPathsToAtoms(self._files,
+ exclude_paths=self._exclude_files))
+
+ def singleBuilder(cls, options, settings, trees):
+ if not "files" in options:
+ raise SetConfigError(_("no files given"))
+
+ exclude_files = options.get("exclude-files")
+ if exclude_files is not None:
+ exclude_files = frozenset(portage.util.shlex_split(exclude_files))
+ return cls(vardb=trees["vartree"].dbapi, exclude_files=exclude_files,
+ files=frozenset(portage.util.shlex_split(options["files"])))
+
+ singleBuilder = classmethod(singleBuilder)
+
+class VariableSet(EverythingSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "that match specified values of a specified variable."
+
+ def __init__(self, vardb, metadatadb=None, variable=None, includes=None, excludes=None):
+ super(VariableSet, self).__init__(vardb)
+ self._metadatadb = metadatadb
+ self._variable = variable
+ self._includes = includes
+ self._excludes = excludes
+
+ def _filter(self, atom):
+ ebuild = best(self._metadatadb.match(atom))
+ if not ebuild:
+ return False
+ values, = self._metadatadb.aux_get(ebuild, [self._variable])
+ values = values.split()
+ if self._includes and not self._includes.intersection(values):
+ return False
+ if self._excludes and self._excludes.intersection(values):
+ return False
+ return True
+
+ def singleBuilder(cls, options, settings, trees):
+
+ variable = options.get("variable")
+ if variable is None:
+ raise SetConfigError(_("missing required attribute: 'variable'"))
+
+ includes = options.get("includes", "")
+ excludes = options.get("excludes", "")
+
+ if not (includes or excludes):
+ raise SetConfigError(_("no includes or excludes given"))
+
+ metadatadb = options.get("metadata-source", "vartree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option metadata-source") % metadatadb)
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi,
+ excludes=frozenset(excludes.split()),
+ includes=frozenset(includes.split()),
+ variable=variable)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class DowngradeSet(PackageSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "for which the highest visible ebuild version is lower than " + \
+ "the currently installed version."
+
+ def __init__(self, portdb=None, vardb=None):
+ super(DowngradeSet, self).__init__()
+ self._portdb = portdb
+ self._vardb = vardb
+
+ def load(self):
+ atoms = []
+ xmatch = self._portdb.xmatch
+ xmatch_level = "bestmatch-visible"
+ cp_list = self._vardb.cp_list
+ aux_get = self._vardb.aux_get
+ aux_keys = ["SLOT"]
+ for cp in self._vardb.cp_all():
+ for cpv in cp_list(cp):
+ slot, = aux_get(cpv, aux_keys)
+ slot_atom = "%s:%s" % (cp, slot)
+ ebuild = xmatch(xmatch_level, slot_atom)
+ if not ebuild:
+ continue
+ ebuild_split = catpkgsplit(ebuild)[1:]
+ installed_split = catpkgsplit(cpv)[1:]
+ if pkgcmp(installed_split, ebuild_split) > 0:
+ atoms.append(slot_atom)
+
+ self._setAtoms(atoms)
+
+ def singleBuilder(cls, options, settings, trees):
+ return cls(portdb=trees["porttree"].dbapi,
+ vardb=trees["vartree"].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class UnavailableSet(EverythingSet):
+
+ _operations = ["unmerge"]
+
+ description = "Package set which contains all installed " + \
+ "packages for which there are no visible ebuilds " + \
+ "corresponding to the same $CATEGORY/$PN:$SLOT."
+
+ def __init__(self, vardb, metadatadb=None):
+ super(UnavailableSet, self).__init__(vardb)
+ self._metadatadb = metadatadb
+
+ def _filter(self, atom):
+ return not self._metadatadb.match(atom)
+
+ def singleBuilder(cls, options, settings, trees):
+
+ metadatadb = options.get("metadata-source", "porttree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option "
+ "metadata-source") % (metadatadb,))
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class UnavailableBinaries(EverythingSet):
+
+ _operations = ('merge', 'unmerge',)
+
+ description = "Package set which contains all installed " + \
+ "packages for which corresponding binary packages " + \
+ "are not available."
+
+ def __init__(self, vardb, metadatadb=None):
+ super(UnavailableBinaries, self).__init__(vardb)
+ self._metadatadb = metadatadb
+
+ def _filter(self, atom):
+ inst_pkg = self._db.match(atom)
+ if not inst_pkg:
+ return False
+ inst_cpv = inst_pkg[0]
+ return not self._metadatadb.cpv_exists(inst_cpv)
+
+ def singleBuilder(cls, options, settings, trees):
+
+ metadatadb = options.get("metadata-source", "bintree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option "
+ "metadata-source") % (metadatadb,))
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class CategorySet(PackageSet):
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, category, dbapi, only_visible=True):
+ super(CategorySet, self).__init__()
+ self._db = dbapi
+ self._category = category
+ self._check = only_visible
+ if only_visible:
+ s="visible"
+ else:
+ s="all"
+ self.description = "Package set containing %s packages of category %s" % (s, self._category)
+
+ def load(self):
+ myatoms = []
+ for cp in self._db.cp_all():
+ if catsplit(cp)[0] == self._category:
+ if (not self._check) or len(self._db.match(cp)) > 0:
+ myatoms.append(cp)
+ self._setAtoms(myatoms)
+
+ def _builderGetRepository(cls, options, repositories):
+ repository = options.get("repository", "porttree")
+ if not repository in repositories:
+ raise SetConfigError(_("invalid repository class '%s'") % repository)
+ return repository
+ _builderGetRepository = classmethod(_builderGetRepository)
+
+ def _builderGetVisible(cls, options):
+ return get_boolean(options, "only_visible", True)
+ _builderGetVisible = classmethod(_builderGetVisible)
+
+ def singleBuilder(cls, options, settings, trees):
+ if not "category" in options:
+ raise SetConfigError(_("no category given"))
+
+ category = options["category"]
+ if not category in settings.categories:
+ raise SetConfigError(_("invalid category name '%s'") % category)
+
+ repository = cls._builderGetRepository(options, trees.keys())
+ visible = cls._builderGetVisible(options)
+
+ return CategorySet(category, dbapi=trees[repository].dbapi, only_visible=visible)
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(cls, options, settings, trees):
+ rValue = {}
+
+ if "categories" in options:
+ categories = options["categories"].split()
+ invalid = set(categories).difference(settings.categories)
+ if invalid:
+ raise SetConfigError(_("invalid categories: %s") % ", ".join(list(invalid)))
+ else:
+ categories = settings.categories
+
+ repository = cls._builderGetRepository(options, trees.keys())
+ visible = cls._builderGetVisible(options)
+ name_pattern = options.get("name_pattern", "$category/*")
+
+ if not "$category" in name_pattern and not "${category}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include $category placeholder"))
+
+ for cat in categories:
+ myset = CategorySet(cat, trees[repository].dbapi, only_visible=visible)
+ myname = name_pattern.replace("$category", cat)
+ myname = myname.replace("${category}", cat)
+ rValue[myname] = myset
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class AgeSet(EverythingSet):
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, vardb, mode="older", age=7):
+ super(AgeSet, self).__init__(vardb)
+ self._mode = mode
+ self._age = age
+
+ def _filter(self, atom):
+
+ cpv = self._db.match(atom)[0]
+ path = self._db.getpath(cpv, filename="COUNTER")
+ age = (time.time() - os.stat(path).st_mtime) / (3600 * 24)
+ if ((self._mode == "older" and age <= self._age) \
+ or (self._mode == "newer" and age >= self._age)):
+ return False
+ else:
+ return True
+
+ def singleBuilder(cls, options, settings, trees):
+ mode = options.get("mode", "older")
+ if str(mode).lower() not in ["newer", "older"]:
+ raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
+ try:
+ age = int(options.get("age", "7"))
+ except ValueError as e:
+ raise SetConfigError(_("value of option 'age' is not an integer"))
+ return AgeSet(vardb=trees["vartree"].dbapi, mode=mode, age=age)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class RebuiltBinaries(EverythingSet):
+ _operations = ('merge',)
+ _aux_keys = ('BUILD_TIME',)
+
+ def __init__(self, vardb, bindb=None):
+ super(RebuiltBinaries, self).__init__(vardb, bindb=bindb)
+ self._bindb = bindb
+
+ def _filter(self, atom):
+ cpv = self._db.match(atom)[0]
+ inst_build_time, = self._db.aux_get(cpv, self._aux_keys)
+ try:
+ bin_build_time, = self._bindb.aux_get(cpv, self._aux_keys)
+ except KeyError:
+ return False
+ return bool(bin_build_time and (inst_build_time != bin_build_time))
+
+ def singleBuilder(cls, options, settings, trees):
+ return RebuiltBinaries(trees["vartree"].dbapi,
+ bindb=trees["bintree"].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/_sets/files.py b/portage_with_autodep/pym/portage/_sets/files.py
new file mode 100644
index 0000000..f19ecf6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/files.py
@@ -0,0 +1,341 @@
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+from itertools import chain
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.util import grabfile, write_atomic, ensure_dirs, normalize_path
+from portage.const import USER_CONFIG_PATH, WORLD_FILE, WORLD_SETS_FILE
+from portage.const import _ENABLE_SET_CONFIG
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage import portage_gid
+from portage._sets.base import PackageSet, EditablePackageSet
+from portage._sets import SetConfigError, SETPREFIX, get_boolean
+from portage.env.loaders import ItemFileLoader, KeyListFileLoader
+from portage.env.validators import ValidAtomValidator
+from portage import cpv_getkey
+
+__all__ = ["StaticFileSet", "ConfigFileSet", "WorldSelectedSet"]
+
+class StaticFileSet(EditablePackageSet):
+ _operations = ["merge", "unmerge"]
+ _repopath_match = re.compile(r'.*\$\{repository:(?P<reponame>.+)\}.*')
+ _repopath_sub = re.compile(r'\$\{repository:(?P<reponame>.+)\}')
+
+ def __init__(self, filename, greedy=False, dbapi=None):
+ super(StaticFileSet, self).__init__(allow_repo=True)
+ self._filename = filename
+ self._mtime = None
+ self.description = "Package set loaded from file %s" % self._filename
+ self.loader = ItemFileLoader(self._filename, self._validate)
+ if greedy and not dbapi:
+ self.errors.append(_("%s configured as greedy set, but no dbapi instance passed in constructor") % self._filename)
+ greedy = False
+ self.greedy = greedy
+ self.dbapi = dbapi
+
+ metadata = grabfile(self._filename + ".metadata")
+ key = None
+ value = []
+ for line in metadata:
+ line = line.strip()
+ if len(line) == 0 and key != None:
+ setattr(self, key, " ".join(value))
+ key = None
+ elif line[-1] == ":" and key == None:
+ key = line[:-1].lower()
+ value = []
+ elif key != None:
+ value.append(line)
+ else:
+ pass
+ else:
+ if key != None:
+ setattr(self, key, " ".join(value))
+
+ def _validate(self, atom):
+ return bool(atom[:1] == SETPREFIX or ValidAtomValidator(atom, allow_repo=True))
+
+ def write(self):
+ write_atomic(self._filename, "".join("%s\n" % (atom,) \
+ for atom in sorted(chain(self._atoms, self._nonatoms))))
+
+ def load(self):
+ try:
+ mtime = os.stat(self._filename).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime != mtime):
+ try:
+ data, errors = self.loader.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ if self.greedy:
+ atoms = []
+ for a in data:
+ matches = self.dbapi.match(a)
+ for cpv in matches:
+ atoms.append("%s:%s" % (cpv_getkey(cpv),
+ self.dbapi.aux_get(cpv, ["SLOT"])[0]))
+ # In addition to any installed slots, also try to pull
+ # in the latest new slot that may be available.
+ atoms.append(a)
+ else:
+ atoms = iter(data)
+ self._setAtoms(atoms)
+ self._mtime = mtime
+
+ def singleBuilder(self, options, settings, trees):
+ if not "filename" in options:
+ raise SetConfigError(_("no filename specified"))
+ greedy = get_boolean(options, "greedy", False)
+ filename = options["filename"]
+ # look for repository path variables
+ match = self._repopath_match.match(filename)
+ if match:
+ try:
+ filename = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], filename)
+ except KeyError:
+ raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])
+ return StaticFileSet(filename, greedy=greedy, dbapi=trees["vartree"].dbapi)
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(self, options, settings, trees):
+ rValue = {}
+ directory = options.get("directory",
+ os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH, "sets"))
+ name_pattern = options.get("name_pattern", "${name}")
+ if not "$name" in name_pattern and not "${name}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include ${name} placeholder"))
+ greedy = get_boolean(options, "greedy", False)
+ # look for repository path variables
+ match = self._repopath_match.match(directory)
+ if match:
+ try:
+ directory = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], directory)
+ except KeyError:
+ raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])
+
+ try:
+ directory = _unicode_decode(directory,
+ encoding=_encodings['fs'], errors='strict')
+ # Now verify that we can also encode it.
+ _unicode_encode(directory,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeError:
+ directory = _unicode_decode(directory,
+ encoding=_encodings['fs'], errors='replace')
+ raise SetConfigError(
+ _("Directory path contains invalid character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], directory))
+
+ if os.path.isdir(directory):
+ directory = normalize_path(directory)
+
+ for parent, dirs, files in os.walk(directory):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ for d in dirs[:]:
+ if d[:1] == '.':
+ dirs.remove(d)
+ for filename in files:
+ try:
+ filename = _unicode_decode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if filename[:1] == '.':
+ continue
+ if filename.endswith(".metadata"):
+ continue
+ filename = os.path.join(parent,
+ filename)[1 + len(directory):]
+ myname = name_pattern.replace("$name", filename)
+ myname = myname.replace("${name}", filename)
+ rValue[myname] = StaticFileSet(
+ os.path.join(directory, filename),
+ greedy=greedy, dbapi=trees["vartree"].dbapi)
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class ConfigFileSet(PackageSet):
+ def __init__(self, filename):
+ super(ConfigFileSet, self).__init__()
+ self._filename = filename
+ self.description = "Package set generated from %s" % self._filename
+ self.loader = KeyListFileLoader(self._filename, ValidAtomValidator)
+
+ def load(self):
+ data, errors = self.loader.load()
+ self._setAtoms(iter(data))
+
+ def singleBuilder(self, options, settings, trees):
+ if not "filename" in options:
+ raise SetConfigError(_("no filename specified"))
+ return ConfigFileSet(options["filename"])
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(self, options, settings, trees):
+ rValue = {}
+ directory = options.get("directory",
+ os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH))
+ name_pattern = options.get("name_pattern", "sets/package_$suffix")
+ if not "$suffix" in name_pattern and not "${suffix}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include $suffix placeholder"))
+ for suffix in ["keywords", "use", "mask", "unmask"]:
+ myname = name_pattern.replace("$suffix", suffix)
+ myname = myname.replace("${suffix}", suffix)
+ rValue[myname] = ConfigFileSet(os.path.join(directory, "package."+suffix))
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class WorldSelectedSet(EditablePackageSet):
+ description = "Set of packages that were directly installed by the user"
+
+ def __init__(self, eroot):
+ super(WorldSelectedSet, self).__init__(allow_repo=True)
+ # most attributes exist twice as atoms and non-atoms are stored in
+ # separate files
+ self._lock = None
+ self._filename = os.path.join(eroot, WORLD_FILE)
+ self.loader = ItemFileLoader(self._filename, self._validate)
+ self._mtime = None
+
+ self._filename2 = os.path.join(eroot, WORLD_SETS_FILE)
+ self.loader2 = ItemFileLoader(self._filename2, self._validate2)
+ self._mtime2 = None
+
+ def _validate(self, atom):
+ return ValidAtomValidator(atom, allow_repo=True)
+
+ def _validate2(self, setname):
+ return setname.startswith(SETPREFIX)
+
+ def write(self):
+ write_atomic(self._filename,
+ "".join(sorted("%s\n" % x for x in self._atoms)))
+
+ if _ENABLE_SET_CONFIG:
+ write_atomic(self._filename2,
+ "".join(sorted("%s\n" % x for x in self._nonatoms)))
+
+ def load(self):
+ atoms = []
+ nonatoms = []
+ atoms_changed = False
+ # load atoms and non-atoms from different files so the worldfile is
+ # backwards-compatible with older versions and other PMs, even though
+ # it's supposed to be private state data :/
+ try:
+ mtime = os.stat(self._filename).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime != mtime):
+ try:
+ data, errors = self.loader.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ atoms = list(data)
+ self._mtime = mtime
+ atoms_changed = True
+ else:
+ atoms.extend(self._atoms)
+
+ if _ENABLE_SET_CONFIG:
+ changed2, nonatoms = self._load2()
+ atoms_changed |= changed2
+
+ if atoms_changed:
+ self._setAtoms(atoms+nonatoms)
+
+ def _load2(self):
+ changed = False
+ try:
+ mtime = os.stat(self._filename2).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime2 != mtime):
+ try:
+ data, errors = self.loader2.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ nonatoms = list(data)
+ self._mtime2 = mtime
+ changed = True
+ else:
+ nonatoms = list(self._nonatoms)
+
+ return changed, nonatoms
+
+ def _ensure_dirs(self):
+ ensure_dirs(os.path.dirname(self._filename), gid=portage_gid, mode=0o2750, mask=0o2)
+
+ def lock(self):
+ self._ensure_dirs()
+ self._lock = lockfile(self._filename, wantnewlockfile=1)
+
+ def unlock(self):
+ unlockfile(self._lock)
+ self._lock = None
+
+ def cleanPackage(self, vardb, cpv):
+ '''
+ Before calling this function you should call lock and load.
+ After calling this function you should call unlock.
+ '''
+ if not self._lock:
+ raise AssertionError('cleanPackage needs the set to be locked')
+
+ worldlist = list(self._atoms)
+ mykey = cpv_getkey(cpv)
+ newworldlist = []
+ for x in worldlist:
+ if x.cp == mykey:
+ matches = vardb.match(x, use_cache=0)
+ if not matches:
+ #zap our world entry
+ pass
+ elif len(matches) == 1 and matches[0] == cpv:
+ #zap our world entry
+ pass
+ else:
+ #others are around; keep it.
+ newworldlist.append(x)
+ else:
+ #this doesn't match the package we're unmerging; keep it.
+ newworldlist.append(x)
+
+ newworldlist.extend(self._nonatoms)
+ self.replace(newworldlist)
+
+ def singleBuilder(self, options, settings, trees):
+ return WorldSelectedSet(settings["EROOT"])
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/_sets/libs.py b/portage_with_autodep/pym/portage/_sets/libs.py
new file mode 100644
index 0000000..6c5babc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/libs.py
@@ -0,0 +1,98 @@
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from portage.localization import _
+from portage._sets.base import PackageSet
+from portage._sets import get_boolean, SetConfigError
+from portage.versions import cpv_getkey
+import portage
+
+class LibraryConsumerSet(PackageSet):
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, vardbapi, debug=False):
+ super(LibraryConsumerSet, self).__init__()
+ self.dbapi = vardbapi
+ self.debug = debug
+
+ def mapPathsToAtoms(self, paths):
+ rValue = set()
+ for p in paths:
+ for cpv in self.dbapi._linkmap.getOwners(p):
+ try:
+ slot, = self.dbapi.aux_get(cpv, ["SLOT"])
+ except KeyError:
+ # This is expected for preserved libraries
+ # of packages that have been uninstalled
+ # without replacement.
+ pass
+ else:
+ rValue.add("%s:%s" % (cpv_getkey(cpv), slot))
+ return rValue
+
+class LibraryFileConsumerSet(LibraryConsumerSet):
+
+ """
+ Note: This does not detect libtool archive (*.la) files that consume the
+ specified files (revdep-rebuild is able to detect them).
+ """
+
+ description = "Package set which contains all packages " + \
+ "that consume the specified library file(s)."
+
+ def __init__(self, vardbapi, files, **kargs):
+ super(LibraryFileConsumerSet, self).__init__(vardbapi, **kargs)
+ self.files = files
+
+ def load(self):
+ consumers = set()
+ for lib in self.files:
+ consumers.update(self.dbapi._linkmap.findConsumers(lib))
+
+ if not consumers:
+ return
+ self._setAtoms(self.mapPathsToAtoms(consumers))
+
+ def singleBuilder(cls, options, settings, trees):
+ files = tuple(portage.util.shlex_split(options.get("files", "")))
+ if not files:
+ raise SetConfigError(_("no files given"))
+ debug = get_boolean(options, "debug", False)
+ return LibraryFileConsumerSet(trees["vartree"].dbapi,
+ files, debug=debug)
+ singleBuilder = classmethod(singleBuilder)
+
+class PreservedLibraryConsumerSet(LibraryConsumerSet):
+ def load(self):
+ reg = self.dbapi._plib_registry
+ if reg is None:
+ # preserve-libs is entirely disabled
+ return
+ consumers = set()
+ if reg:
+ plib_dict = reg.getPreservedLibs()
+ for libs in plib_dict.values():
+ for lib in libs:
+ if self.debug:
+ print(lib)
+ for x in sorted(self.dbapi._linkmap.findConsumers(lib)):
+ print(" ", x)
+ print("-"*40)
+ consumers.update(self.dbapi._linkmap.findConsumers(lib))
+ # Don't rebuild packages just because they contain preserved
+ # libs that happen to be consumers of other preserved libs.
+ for libs in plib_dict.values():
+ consumers.difference_update(libs)
+ else:
+ return
+ if not consumers:
+ return
+ self._setAtoms(self.mapPathsToAtoms(consumers))
+
+ def singleBuilder(cls, options, settings, trees):
+ debug = get_boolean(options, "debug", False)
+ return PreservedLibraryConsumerSet(trees["vartree"].dbapi,
+ debug=debug)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/_sets/profiles.py b/portage_with_autodep/pym/portage/_sets/profiles.py
new file mode 100644
index 0000000..39a2968
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/profiles.py
@@ -0,0 +1,53 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+
+from portage import os
+from portage.util import grabfile_package, stack_lists
+from portage._sets.base import PackageSet
+from portage._sets import get_boolean
+from portage.util import writemsg_level
+
+__all__ = ["PackagesSystemSet"]
+
+class PackagesSystemSet(PackageSet):
+ _operations = ["merge"]
+
+ def __init__(self, profile_paths, debug=False):
+ super(PackagesSystemSet, self).__init__()
+ self._profile_paths = profile_paths
+ self._debug = debug
+ if profile_paths:
+ description = self._profile_paths[-1]
+ if description == "/etc/portage/profile" and \
+ len(self._profile_paths) > 1:
+ description = self._profile_paths[-2]
+ else:
+ description = None
+ self.description = "System packages for profile %s" % description
+
+ def load(self):
+ debug = self._debug
+ if debug:
+ writemsg_level("\nPackagesSystemSet: profile paths: %s\n" % \
+ (self._profile_paths,), level=logging.DEBUG, noiselevel=-1)
+
+ mylist = [grabfile_package(os.path.join(x, "packages"), verify_eapi=True) for x in self._profile_paths]
+
+ if debug:
+ writemsg_level("\nPackagesSystemSet: raw packages: %s\n" % \
+ (mylist,), level=logging.DEBUG, noiselevel=-1)
+
+ mylist = stack_lists(mylist, incremental=1)
+
+ if debug:
+ writemsg_level("\nPackagesSystemSet: stacked packages: %s\n" % \
+ (mylist,), level=logging.DEBUG, noiselevel=-1)
+
+ self._setAtoms([x[1:] for x in mylist if x[0] == "*"])
+
+ def singleBuilder(self, options, settings, trees):
+ debug = get_boolean(options, "debug", False)
+ return PackagesSystemSet(settings.profiles, debug=debug)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/_sets/security.py b/portage_with_autodep/pym/portage/_sets/security.py
new file mode 100644
index 0000000..2d8fcf6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/security.py
@@ -0,0 +1,86 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.glsa as glsa
+from portage._sets.base import PackageSet
+from portage.versions import catpkgsplit, pkgcmp
+from portage._sets import get_boolean
+
+__all__ = ["SecuritySet", "NewGlsaSet", "NewAffectedSet", "AffectedSet"]
+
+class SecuritySet(PackageSet):
+ _operations = ["merge"]
+ _skip_applied = False
+
+ description = "package set that includes all packages possibly affected by a GLSA"
+
+ def __init__(self, settings, vardbapi, portdbapi, least_change=True):
+ super(SecuritySet, self).__init__()
+ self._settings = settings
+ self._vardbapi = vardbapi
+ self._portdbapi = portdbapi
+ self._least_change = least_change
+
+ def getGlsaList(self, skip_applied):
+ glsaindexlist = glsa.get_glsa_list(self._settings)
+ if skip_applied:
+ applied_list = glsa.get_applied_glsas(self._settings)
+ glsaindexlist = set(glsaindexlist).difference(applied_list)
+ glsaindexlist = list(glsaindexlist)
+ glsaindexlist.sort()
+ return glsaindexlist
+
+ def load(self):
+ glsaindexlist = self.getGlsaList(self._skip_applied)
+ atomlist = []
+ for glsaid in glsaindexlist:
+ myglsa = glsa.Glsa(glsaid, self._settings, self._vardbapi, self._portdbapi)
+ #print glsaid, myglsa.isVulnerable(), myglsa.isApplied(), myglsa.getMergeList()
+ if self.useGlsa(myglsa):
+ atomlist += ["="+x for x in myglsa.getMergeList(least_change=self._least_change)]
+ self._setAtoms(self._reduce(atomlist))
+
+ def _reduce(self, atomlist):
+ mydict = {}
+ for atom in atomlist[:]:
+ cpv = self._portdbapi.xmatch("match-all", atom)[0]
+ slot = self._portdbapi.aux_get(cpv, ["SLOT"])[0]
+ cps = "/".join(catpkgsplit(cpv)[0:2]) + ":" + slot
+ if not cps in mydict:
+ mydict[cps] = (atom, cpv)
+ else:
+ other_cpv = mydict[cps][1]
+ if pkgcmp(catpkgsplit(cpv)[1:], catpkgsplit(other_cpv)[1:]) > 0:
+ atomlist.remove(mydict[cps][0])
+ mydict[cps] = (atom, cpv)
+ return atomlist
+
+ def useGlsa(self, myglsa):
+ return True
+
+ def updateAppliedList(self):
+ glsaindexlist = self.getGlsaList(True)
+ applied_list = glsa.get_applied_glsas(self._settings)
+ for glsaid in glsaindexlist:
+ myglsa = glsa.Glsa(glsaid, self._settings, self._vardbapi, self._portdbapi)
+ if not myglsa.isVulnerable() and not myglsa.nr in applied_list:
+ myglsa.inject()
+
+ def singleBuilder(cls, options, settings, trees):
+ least_change = not get_boolean(options, "use_emerge_resolver", False)
+ return cls(settings, trees["vartree"].dbapi, trees["porttree"].dbapi, least_change=least_change)
+ singleBuilder = classmethod(singleBuilder)
+
+class NewGlsaSet(SecuritySet):
+ _skip_applied = True
+ description = "Package set that includes all packages possibly affected by an unapplied GLSA"
+
+class AffectedSet(SecuritySet):
+ description = "Package set that includes all packages affected by an unapplied GLSA"
+
+ def useGlsa(self, myglsa):
+ return myglsa.isVulnerable()
+
+class NewAffectedSet(AffectedSet):
+ _skip_applied = True
+ description = "Package set that includes all packages affected by an unapplied GLSA"
diff --git a/portage_with_autodep/pym/portage/_sets/shell.py b/portage_with_autodep/pym/portage/_sets/shell.py
new file mode 100644
index 0000000..2c95845
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/shell.py
@@ -0,0 +1,44 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+from portage import os
+from portage import _unicode_decode
+from portage._sets.base import PackageSet
+from portage._sets import SetConfigError
+
+__all__ = ["CommandOutputSet"]
+
+class CommandOutputSet(PackageSet):
+ """This class creates a PackageSet from the output of a shell command.
+ The shell command should produce one atom per line, that is:
+
+ >>> atom1
+ atom2
+ ...
+ atomN
+
+ Args:
+ name: A string that identifies the set.
+ command: A string or sequence identifying the command to run
+ (see the subprocess.Popen documentaion for the format)
+ """
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, command):
+ super(CommandOutputSet, self).__init__()
+ self._command = command
+ self.description = "Package set generated from output of '%s'" % self._command
+
+ def load(self):
+ pipe = subprocess.Popen(self._command, stdout=subprocess.PIPE, shell=True)
+ stdout, stderr = pipe.communicate()
+ if pipe.wait() == os.EX_OK:
+ self._setAtoms(_unicode_decode(stdout).splitlines())
+
+ def singleBuilder(self, options, settings, trees):
+ if not "command" in options:
+ raise SetConfigError("no command specified")
+ return CommandOutputSet(options["command"])
+ singleBuilder = classmethod(singleBuilder)
diff --git a/portage_with_autodep/pym/portage/cache/__init__.py b/portage_with_autodep/pym/portage/cache/__init__.py
new file mode 100644
index 0000000..e7fe599
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/__init__.py
@@ -0,0 +1,4 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
diff --git a/portage_with_autodep/pym/portage/cache/anydbm.py b/portage_with_autodep/pym/portage/cache/anydbm.py
new file mode 100644
index 0000000..1d56b14
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/anydbm.py
@@ -0,0 +1,113 @@
+# Copyright 2005-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from __future__ import absolute_import
+
+try:
+ import anydbm as anydbm_module
+except ImportError:
+ # python 3.x
+ import dbm as anydbm_module
+
+try:
+ import dbm.gnu as gdbm
+except ImportError:
+ try:
+ import gdbm
+ except ImportError:
+ gdbm = None
+
+try:
+ from dbm import whichdb
+except ImportError:
+ from whichdb import whichdb
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+from portage import _unicode_encode
+from portage import os
+import sys
+from portage.cache import fs_template
+from portage.cache import cache_errors
+
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+ cleanse_keys = True
+ serialize_eclasses = False
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+
+ default_db = config.get("dbtype","anydbm")
+ if not default_db.startswith("."):
+ default_db = '.' + default_db
+
+ self._db_path = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+default_db)
+ self.__db = None
+ mode = "w"
+ if whichdb(self._db_path) in ("dbm.gnu", "gdbm"):
+ # Allow multiple concurrent writers (see bug #53607).
+ mode += "u"
+ try:
+ # dbm.open() will not work with bytes in python-3.1:
+ # TypeError: can't concat bytes to str
+ self.__db = anydbm_module.open(self._db_path,
+ mode, self._perms)
+ except anydbm_module.error:
+ # XXX handle this at some point
+ try:
+ self._ensure_dirs()
+ self._ensure_dirs(self._db_path)
+ except (OSError, IOError) as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ # try again if failed
+ try:
+ if self.__db == None:
+ # dbm.open() will not work with bytes in python-3.1:
+ # TypeError: can't concat bytes to str
+ if gdbm is None:
+ self.__db = anydbm_module.open(self._db_path,
+ "c", self._perms)
+ else:
+ # Prefer gdbm type if available, since it allows
+ # multiple concurrent writers (see bug #53607).
+ self.__db = gdbm.open(self._db_path,
+ "cu", self._perms)
+ except anydbm_module.error as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+ self._ensure_access(self._db_path)
+
+ def iteritems(self):
+ # dbm doesn't implement items()
+ for k in self.__db.keys():
+ yield (k, self[k])
+
+ def _getitem(self, cpv):
+ # we override getitem because it's just a cpickling of the data handed in.
+ return pickle.loads(self.__db[_unicode_encode(cpv)])
+
+ def _setitem(self, cpv, values):
+ self.__db[_unicode_encode(cpv)] = pickle.dumps(values,pickle.HIGHEST_PROTOCOL)
+
+ def _delitem(self, cpv):
+ del self.__db[cpv]
+
+ def __iter__(self):
+ return iter(list(self.__db.keys()))
+
+ def __contains__(self, cpv):
+ return cpv in self.__db
+
+ def __del__(self):
+ if "__db" in self.__dict__ and self.__db != None:
+ self.__db.sync()
+ self.__db.close()
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
diff --git a/portage_with_autodep/pym/portage/cache/cache_errors.py b/portage_with_autodep/pym/portage/cache/cache_errors.py
new file mode 100644
index 0000000..3c1f239
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/cache_errors.py
@@ -0,0 +1,62 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+class CacheError(Exception): pass
+
+class InitializationError(CacheError):
+ def __init__(self, class_name, error):
+ self.error, self.class_name = error, class_name
+ def __str__(self):
+ return "Creation of instance %s failed due to %s" % \
+ (self.class_name, str(self.error))
+
+
+class CacheCorruption(CacheError):
+ def __init__(self, key, ex):
+ self.key, self.ex = key, ex
+ def __str__(self):
+ return "%s is corrupt: %s" % (self.key, str(self.ex))
+
+
+class GeneralCacheCorruption(CacheError):
+ def __init__(self,ex): self.ex = ex
+ def __str__(self): return "corruption detected: %s" % str(self.ex)
+
+
+class InvalidRestriction(CacheError):
+ def __init__(self, key, restriction, exception=None):
+ if exception == None: exception = ''
+ self.key, self.restriction, self.ex = key, restriction, ex
+ def __str__(self):
+ return "%s:%s is not valid: %s" % \
+ (self.key, self.restriction, str(self.ex))
+
+
+class ReadOnlyRestriction(CacheError):
+ def __init__(self, info=''):
+ self.info = info
+ def __str__(self):
+ return "cache is non-modifiable"+str(self.info)
+
+class StatCollision(CacheError):
+ """
+ If the content of a cache entry changes and neither the file mtime nor
+ size changes, it will prevent rsync from detecting changes. Cache backends
+ may raise this exception from _setitem() if they detect this type of stat
+ collision. See bug #139134.
+ """
+ def __init__(self, key, filename, mtime, size):
+ self.key = key
+ self.filename = filename
+ self.mtime = mtime
+ self.size = size
+
+ def __str__(self):
+ return "%s has stat collision with size %s and mtime %s" % \
+ (self.key, self.size, self.mtime)
+
+ def __repr__(self):
+ return "portage.cache.cache_errors.StatCollision(%s)" % \
+ (', '.join((repr(self.key), repr(self.filename),
+ repr(self.mtime), repr(self.size))),)
diff --git a/portage_with_autodep/pym/portage/cache/ebuild_xattr.py b/portage_with_autodep/pym/portage/cache/ebuild_xattr.py
new file mode 100644
index 0000000..6b388fa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/ebuild_xattr.py
@@ -0,0 +1,171 @@
+# Copyright: 2009-2011 Gentoo Foundation
+# Author(s): Petteri R&#228;ty (betelgeuse@gentoo.org)
+# License: GPL2
+
+__all__ = ['database']
+
+import errno
+
+import portage
+from portage.cache import fs_template
+from portage.versions import catsplit
+from portage import cpv_getkey
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'xattr')
+
+class NoValueException(Exception):
+ pass
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.portdir = self.label
+ self.ns = xattr.NS_USER + '.gentoo.cache'
+ self.keys = set(self._known_keys)
+ self.keys.add('_mtime_')
+ self.keys.add('_eclasses_')
+ # xattrs have an upper length
+ self.max_len = self.__get_max()
+
+ def __get_max(self):
+ path = os.path.join(self.portdir,'profiles/repo_name')
+ try:
+ return int(self.__get(path,'value_max_len'))
+ except NoValueException as e:
+ max = self.__calc_max(path)
+ self.__set(path,'value_max_len',str(max))
+ return max
+
+ def __calc_max(self,path):
+ """ Find out max attribute length supported by the file system """
+
+ hundred = ''
+ for i in range(100):
+ hundred+='a'
+
+ s=hundred
+
+ # Could use finally but needs python 2.5 then
+ try:
+ while True:
+ self.__set(path,'test_max',s)
+ s+=hundred
+ except IOError as e:
+ # ext based give wrong errno
+ # http://bugzilla.kernel.org/show_bug.cgi?id=12793
+ if e.errno in (errno.E2BIG, errno.ENOSPC):
+ result = len(s)-100
+ else:
+ raise
+
+ try:
+ self.__remove(path,'test_max')
+ except IOError as e:
+ if e.errno != errno.ENODATA:
+ raise
+
+ return result
+
+ def __get_path(self,cpv):
+ cat,pn = catsplit(cpv_getkey(cpv))
+ return os.path.join(self.portdir,cat,pn,os.path.basename(cpv) + ".ebuild")
+
+ def __has_cache(self,path):
+ try:
+ self.__get(path,'_mtime_')
+ except NoValueException as e:
+ return False
+
+ return True
+
+ def __get(self,path,key,default=None):
+ try:
+ return xattr.get(path,key,namespace=self.ns)
+ except IOError as e:
+ if not default is None and errno.ENODATA == e.errno:
+ return default
+ else:
+ raise NoValueException()
+
+ def __remove(self,path,key):
+ xattr.remove(path,key,namespace=self.ns)
+
+ def __set(self,path,key,value):
+ xattr.set(path,key,value,namespace=self.ns)
+
+ def _getitem(self, cpv):
+ values = {}
+ path = self.__get_path(cpv)
+ all = {}
+ for tuple in xattr.get_all(path,namespace=self.ns):
+ key,value = tuple
+ all[key] = value
+
+ if not '_mtime_' in all:
+ raise KeyError(cpv)
+
+ # We default to '' like other caches
+ for key in self.keys:
+ attr_value = all.get(key,'1:')
+ parts,sep,value = attr_value.partition(':')
+ parts = int(parts)
+ if parts > 1:
+ for i in range(1,parts):
+ value += all.get(key+str(i))
+ values[key] = value
+
+ return values
+
+ def _setitem(self, cpv, values):
+ path = self.__get_path(cpv)
+ max = self.max_len
+ for key,value in values.items():
+ # mtime comes in as long so need to convert to strings
+ s = str(value)
+ # We need to split long values
+ value_len = len(s)
+ parts = 0
+ if value_len > max:
+ # Find out how many parts we need
+ parts = value_len/max
+ if value_len % max > 0:
+ parts += 1
+
+ # Only the first entry carries the number of parts
+ self.__set(path,key,'%s:%s'%(parts,s[0:max]))
+
+ # Write out the rest
+ for i in range(1,parts):
+ start = i * max
+ val = s[start:start+max]
+ self.__set(path,key+str(i),val)
+ else:
+ self.__set(path,key,"%s:%s"%(1,s))
+
+ def _delitem(self, cpv):
+ pass # Will be gone with the ebuild
+
+ def __contains__(self, cpv):
+ return os.path.exists(self.__get_path(cpv))
+
+ def __iter__(self):
+
+ for root, dirs, files in os.walk(self.portdir):
+ for file in files:
+ try:
+ file = _unicode_decode(file,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if file[-7:] == '.ebuild':
+ cat = os.path.basename(os.path.dirname(root))
+ pn_pv = file[:-7]
+ path = os.path.join(root,file)
+ if self.__has_cache(path):
+ yield "%s/%s/%s" % (cat,os.path.basename(root),file[:-7])
diff --git a/portage_with_autodep/pym/portage/cache/flat_hash.py b/portage_with_autodep/pym/portage/cache/flat_hash.py
new file mode 100644
index 0000000..b6bc074
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/flat_hash.py
@@ -0,0 +1,155 @@
+# Copyright: 2005-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from portage.cache import fs_template
+from portage.cache import cache_errors
+import errno
+import io
+import stat
+import sys
+import os as _os
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+# Coerce to unicode, in order to prevent TypeError when writing
+# raw bytes to TextIOWrapper with python2.
+_setitem_fmt = _unicode_decode("%s=%s\n")
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+ write_keys = set(self._known_keys)
+ write_keys.add("_eclasses_")
+ write_keys.add("_mtime_")
+ self._write_keys = sorted(write_keys)
+ if not self.readonly and not os.path.exists(self.location):
+ self._ensure_dirs()
+
+ def _getitem(self, cpv):
+ # Don't use os.path.join, for better performance.
+ fp = self.location + _os.sep + cpv
+ try:
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ try:
+ lines = myf.read().split("\n")
+ if not lines[-1]:
+ lines.pop()
+ d = self._parse_data(lines, cpv)
+ if '_mtime_' not in d:
+ # Backward compatibility with old cache
+ # that uses mtime mangling.
+ d['_mtime_'] = _os.fstat(myf.fileno())[stat.ST_MTIME]
+ return d
+ finally:
+ myf.close()
+ except (IOError, OSError) as e:
+ if e.errno != errno.ENOENT:
+ raise cache_errors.CacheCorruption(cpv, e)
+ raise KeyError(cpv, e)
+
+ def _parse_data(self, data, cpv):
+ try:
+ return dict( x.split("=", 1) for x in data )
+ except ValueError as e:
+ # If a line is missing an "=", the split length is 1 instead of 2.
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def _setitem(self, cpv, values):
+# import pdb;pdb.set_trace()
+ s = cpv.rfind("/")
+ fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try:
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except (IOError, OSError) as e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except (OSError, IOError) as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try:
+ for k in self._write_keys:
+ v = values.get(k)
+ if not v:
+ continue
+ myf.write(_setitem_fmt % (k, v))
+ finally:
+ myf.close()
+ self._ensure_access(fp)
+
+ #update written. now we move it.
+
+ new_fp = os.path.join(self.location,cpv)
+ try:
+ os.rename(fp, new_fp)
+ except (OSError, IOError) as e:
+ os.remove(fp)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def _delitem(self, cpv):
+# import pdb;pdb.set_trace()
+ try:
+ os.remove(os.path.join(self.location,cpv))
+ except OSError as e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def __contains__(self, cpv):
+ return os.path.exists(os.path.join(self.location, cpv))
+
+ def __iter__(self):
+ """generator for walking the dir struct"""
+ dirs = [(0, self.location)]
+ len_base = len(self.location)
+ while dirs:
+ depth, dir_path = dirs.pop()
+ try:
+ dir_list = os.listdir(dir_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ continue
+ for l in dir_list:
+ if l.endswith(".cpickle"):
+ continue
+ p = os.path.join(dir_path, l)
+ try:
+ st = os.lstat(p)
+ except OSError:
+ # Cache entry disappeared.
+ continue
+ if stat.S_ISDIR(st.st_mode):
+ # Only recurse 1 deep, in order to avoid iteration over
+ # entries from another nested cache instance. This can
+ # happen if the user nests an overlay inside
+ # /usr/portage/local as in bug #302764.
+ if depth < 1:
+ dirs.append((depth+1, p))
+ continue
+ yield p[len_base+1:]
diff --git a/portage_with_autodep/pym/portage/cache/flat_list.py b/portage_with_autodep/pym/portage/cache/flat_list.py
new file mode 100644
index 0000000..7288307
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/flat_list.py
@@ -0,0 +1,134 @@
+# Copyright 2005-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.cache import fs_template
+from portage.cache import cache_errors
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import errno
+import io
+import stat
+import sys
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+# Coerce to unicode, in order to prevent TypeError when writing
+# raw bytes to TextIOWrapper with python2.
+_setitem_fmt = _unicode_decode("%s\n")
+
+# store the current key order *here*.
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ # do not screw with this ordering. _eclasses_ needs to be last
+ auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'IUSE', 'REQUIRED_USE',
+ 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if len(self._known_keys) > len(self.auxdbkey_order) + 2:
+ raise Exception("less ordered keys then auxdbkeys")
+ if not os.path.exists(self.location):
+ self._ensure_dirs()
+
+
+ def _getitem(self, cpv):
+ d = {}
+ try:
+ myf = io.open(_unicode_encode(os.path.join(self.location, cpv),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ for k,v in zip(self.auxdbkey_order, myf):
+ d[k] = v.rstrip("\n")
+ except (OSError, IOError) as e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try:
+ d["_mtime_"] = os.fstat(myf.fileno())[stat.ST_MTIME]
+ except OSError as e:
+ myf.close()
+ raise cache_errors.CacheCorruption(cpv, e)
+ myf.close()
+ return d
+
+
+ def _setitem(self, cpv, values):
+ s = cpv.rfind("/")
+ fp=os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try:
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except (OSError, IOError) as e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf = io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except (OSError, IOError) as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ for x in self.auxdbkey_order:
+ myf.write(_setitem_fmt % (values.get(x, ""),))
+
+ myf.close()
+ self._ensure_access(fp, mtime=values["_mtime_"])
+ #update written. now we move it.
+ new_fp = os.path.join(self.location,cpv)
+ try:
+ os.rename(fp, new_fp)
+ except (OSError, IOError) as e:
+ os.remove(fp)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def _delitem(self, cpv):
+ try:
+ os.remove(os.path.join(self.location,cpv))
+ except OSError as e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def __contains__(self, cpv):
+ return os.path.exists(os.path.join(self.location, cpv))
+
+
+ def __iter__(self):
+ """generator for walking the dir struct"""
+ dirs = [self.location]
+ len_base = len(self.location)
+ while len(dirs):
+ for l in os.listdir(dirs[0]):
+ if l.endswith(".cpickle"):
+ continue
+ p = os.path.join(dirs[0],l)
+ st = os.lstat(p)
+ if stat.S_ISDIR(st.st_mode):
+ dirs.append(p)
+ continue
+ yield p[len_base+1:]
+ dirs.pop(0)
+
+
+ def commit(self): pass
diff --git a/portage_with_autodep/pym/portage/cache/fs_template.py b/portage_with_autodep/pym/portage/cache/fs_template.py
new file mode 100644
index 0000000..a82e862
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/fs_template.py
@@ -0,0 +1,90 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import sys
+from portage.cache import template
+from portage import os
+
+from portage.proxy.lazyimport import lazyimport
+lazyimport(globals(),
+ 'portage.data:portage_gid',
+ 'portage.exception:PortageException',
+ 'portage.util:apply_permissions',
+)
+del lazyimport
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class FsBased(template.database):
+ """template wrapping fs needed options, and providing _ensure_access as a way to
+ attempt to ensure files have the specified owners/perms"""
+
+ def __init__(self, *args, **config):
+ """throws InitializationError if needs args aren't specified
+ gid and perms aren't listed do to an oddity python currying mechanism
+ gid=portage_gid
+ perms=0665"""
+
+ for x, y in (("gid", -1), ("perms", -1)):
+ if x in config:
+ setattr(self, "_"+x, config[x])
+ del config[x]
+ else:
+ setattr(self, "_"+x, y)
+ super(FsBased, self).__init__(*args, **config)
+
+ if self.label.startswith(os.path.sep):
+ # normpath.
+ self.label = os.path.sep + os.path.normpath(self.label).lstrip(os.path.sep)
+
+
+ def _ensure_access(self, path, mtime=-1):
+ """returns true or false if it's able to ensure that path is properly chmod'd and chowned.
+ if mtime is specified, attempts to ensure that's correct also"""
+ try:
+ apply_permissions(path, gid=self._gid, mode=self._perms)
+ if mtime != -1:
+ mtime=long(mtime)
+ os.utime(path, (mtime, mtime))
+ except (PortageException, EnvironmentError):
+ return False
+ return True
+
+ def _ensure_dirs(self, path=None):
+ """with path!=None, ensure beyond self.location. otherwise, ensure self.location"""
+ if path:
+ path = os.path.dirname(path)
+ base = self.location
+ else:
+ path = self.location
+ base='/'
+
+ for dir in path.lstrip(os.path.sep).rstrip(os.path.sep).split(os.path.sep):
+ base = os.path.join(base,dir)
+ if not os.path.exists(base):
+ if self._perms != -1:
+ um = os.umask(0)
+ try:
+ perms = self._perms
+ if perms == -1:
+ perms = 0
+ perms |= 0o755
+ os.mkdir(base, perms)
+ if self._gid != -1:
+ os.chown(base, -1, self._gid)
+ finally:
+ if self._perms != -1:
+ os.umask(um)
+
+
+def gen_label(base, label):
+ """if supplied label is a path, generate a unique label based upon label, and supplied base path"""
+ if label.find(os.path.sep) == -1:
+ return label
+ label = label.strip("\"").strip("'")
+ label = os.path.join(*(label.rstrip(os.path.sep).split(os.path.sep)))
+ tail = os.path.split(label)[1]
+ return "%s-%X" % (tail, abs(label.__hash__()))
+
diff --git a/portage_with_autodep/pym/portage/cache/mappings.py b/portage_with_autodep/pym/portage/cache/mappings.py
new file mode 100644
index 0000000..60a918e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/mappings.py
@@ -0,0 +1,485 @@
+# Copyright: 2005-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+__all__ = ["Mapping", "MutableMapping", "UserDict", "ProtectedDict",
+ "LazyLoad", "slot_dict_class"]
+
+import sys
+import weakref
+
+class Mapping(object):
+ """
+ In python-3.0, the UserDict.DictMixin class has been replaced by
+ Mapping and MutableMapping from the collections module, but 2to3
+ doesn't currently account for this change:
+
+ http://bugs.python.org/issue2876
+
+ As a workaround for the above issue, use this class as a substitute
+ for UserDict.DictMixin so that code converted via 2to3 will run.
+ """
+
+ __slots__ = ()
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def keys(self):
+ return list(self.__iter__())
+
+ def __contains__(self, key):
+ try:
+ value = self[key]
+ except KeyError:
+ return False
+ return True
+
+ def iteritems(self):
+ for k in self:
+ yield (k, self[k])
+
+ def iterkeys(self):
+ return self.__iter__()
+
+ def itervalues(self):
+ for _, v in self.items():
+ yield v
+
+ def values(self):
+ return [v for _, v in self.iteritems()]
+
+ def items(self):
+ return list(self.iteritems())
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __repr__(self):
+ return repr(dict(self.items()))
+
+ def __len__(self):
+ return len(list(self))
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
+ values = itervalues
+
+class MutableMapping(Mapping):
+ """
+ A mutable vesion of the Mapping class.
+ """
+
+ __slots__ = ()
+
+ def clear(self):
+ for key in list(self):
+ del self[key]
+
+ def setdefault(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError("pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+
+ def popitem(self):
+ try:
+ k, v = next(iter(self.items()))
+ except StopIteration:
+ raise KeyError('container is empty')
+ del self[k]
+ return (k, v)
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ other = None
+ if args:
+ other = args[0]
+ if other is None:
+ pass
+ elif hasattr(other, 'iteritems'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'iteritems')():
+ self[k] = v
+ elif hasattr(other, 'items'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'items')():
+ self[k] = v
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self[k] = other[k]
+ else:
+ for k, v in other:
+ self[k] = v
+ if kwargs:
+ self.update(kwargs)
+
+class UserDict(MutableMapping):
+ """
+ Use this class as a substitute for UserDict.UserDict so that
+ code converted via 2to3 will run:
+
+ http://bugs.python.org/issue2876
+ """
+
+ __slots__ = ('data',)
+
+ def __init__(self, *args, **kwargs):
+
+ self.data = {}
+
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+
+ if args:
+ self.update(args[0])
+
+ if kwargs:
+ self.update(kwargs)
+
+ def __repr__(self):
+ return repr(self.data)
+
+ def __contains__(self, key):
+ return key in self.data
+
+ def __iter__(self):
+ return iter(self.data)
+
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ def __setitem__(self, key, item):
+ self.data[key] = item
+
+ def __delitem__(self, key):
+ del self.data[key]
+
+ def clear(self):
+ self.data.clear()
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class OrderedDict(UserDict):
+
+ __slots__ = ('_order',)
+
+ def __init__(self, *args, **kwargs):
+ self._order = []
+ UserDict.__init__(self, *args, **kwargs)
+
+ def __iter__(self):
+ return iter(self._order)
+
+ def __setitem__(self, key, item):
+ if key in self:
+ self._order.remove(key)
+ UserDict.__setitem__(self, key, item)
+ self._order.append(key)
+
+ def __delitem__(self, key):
+ UserDict.__delitem__(self, key)
+ self._order.remove(key)
+
+ def clear(self):
+ UserDict.clear(self)
+ del self._order[:]
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class ProtectedDict(MutableMapping):
+ """
+ given an initial dict, this wraps that dict storing changes in a secondary dict, protecting
+ the underlying dict from changes
+ """
+ __slots__=("orig","new","blacklist")
+
+ def __init__(self, orig):
+ self.orig = orig
+ self.new = {}
+ self.blacklist = {}
+
+
+ def __setitem__(self, key, val):
+ self.new[key] = val
+ if key in self.blacklist:
+ del self.blacklist[key]
+
+
+ def __getitem__(self, key):
+ if key in self.new:
+ return self.new[key]
+ if key in self.blacklist:
+ raise KeyError(key)
+ return self.orig[key]
+
+
+ def __delitem__(self, key):
+ if key in self.new:
+ del self.new[key]
+ elif key in self.orig:
+ if key not in self.blacklist:
+ self.blacklist[key] = True
+ return
+ raise KeyError(key)
+
+
+ def __iter__(self):
+ for k in self.new:
+ yield k
+ for k in self.orig:
+ if k not in self.blacklist and k not in self.new:
+ yield k
+
+ def __contains__(self, key):
+ return key in self.new or (key not in self.blacklist and key in self.orig)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class LazyLoad(Mapping):
+ """
+ Lazy loading of values for a dict
+ """
+ __slots__=("pull", "d")
+
+ def __init__(self, pull_items_func, initial_items=[]):
+ self.d = {}
+ for k, v in initial_items:
+ self.d[k] = v
+ self.pull = pull_items_func
+
+ def __getitem__(self, key):
+ if key in self.d:
+ return self.d[key]
+ elif self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return self.d[key]
+
+ def __iter__(self):
+ if self.pull is not None:
+ self.d.update(self.pull())
+ self.pull = None
+ return iter(self.d)
+
+ def __contains__(self, key):
+ if key in self.d:
+ return True
+ elif self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return key in self.d
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+_slot_dict_classes = weakref.WeakValueDictionary()
+
+def slot_dict_class(keys, prefix="_val_"):
+ """
+ Generates mapping classes that behave similar to a dict but store values
+ as object attributes that are allocated via __slots__. Instances of these
+ objects have a smaller memory footprint than a normal dict object.
+
+ @param keys: Fixed set of allowed keys
+ @type keys: Iterable
+ @param prefix: a prefix to use when mapping
+ attribute names from keys
+ @type prefix: String
+ @rtype: SlotDict
+ @returns: A class that constructs SlotDict instances
+ having the specified keys.
+ """
+ if isinstance(keys, frozenset):
+ keys_set = keys
+ else:
+ keys_set = frozenset(keys)
+ v = _slot_dict_classes.get((keys_set, prefix))
+ if v is None:
+
+ class SlotDict(object):
+
+ allowed_keys = keys_set
+ _prefix = prefix
+ __slots__ = ("__weakref__",) + \
+ tuple(prefix + k for k in allowed_keys)
+
+ def __init__(self, *args, **kwargs):
+
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+
+ if args:
+ self.update(args[0])
+
+ if kwargs:
+ self.update(kwargs)
+
+ def __iter__(self):
+ for k, v in self.iteritems():
+ yield k
+
+ def __len__(self):
+ l = 0
+ for i in self.iteritems():
+ l += 1
+ return l
+
+ def keys(self):
+ return list(self)
+
+ def iteritems(self):
+ prefix = self._prefix
+ for k in self.allowed_keys:
+ try:
+ yield (k, getattr(self, prefix + k))
+ except AttributeError:
+ pass
+
+ def items(self):
+ return list(self.iteritems())
+
+ def itervalues(self):
+ for k, v in self.iteritems():
+ yield v
+
+ def values(self):
+ return list(self.itervalues())
+
+ def __delitem__(self, k):
+ try:
+ delattr(self, self._prefix + k)
+ except AttributeError:
+ raise KeyError(k)
+
+ def __setitem__(self, k, v):
+ setattr(self, self._prefix + k, v)
+
+ def setdefault(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ other = None
+ if args:
+ other = args[0]
+ if other is None:
+ pass
+ elif hasattr(other, 'iteritems'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'iteritems')():
+ self[k] = v
+ elif hasattr(other, 'items'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'items')():
+ self[k] = v
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self[k] = other[k]
+ else:
+ for k, v in other:
+ self[k] = v
+ if kwargs:
+ self.update(kwargs)
+
+ def __getitem__(self, k):
+ try:
+ return getattr(self, self._prefix + k)
+ except AttributeError:
+ raise KeyError(k)
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __contains__(self, k):
+ return hasattr(self, self._prefix + k)
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+
+ def popitem(self):
+ try:
+ k, v = self.iteritems().next()
+ except StopIteration:
+ raise KeyError('container is empty')
+ del self[k]
+ return (k, v)
+
+ def copy(self):
+ c = self.__class__()
+ c.update(self)
+ return c
+
+ def clear(self):
+ for k in self.allowed_keys:
+ try:
+ delattr(self, self._prefix + k)
+ except AttributeError:
+ pass
+
+ def __str__(self):
+ return str(dict(self.iteritems()))
+
+ def __repr__(self):
+ return repr(dict(self.iteritems()))
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
+ values = itervalues
+
+ v = SlotDict
+ _slot_dict_classes[v.allowed_keys] = v
+ return v
diff --git a/portage_with_autodep/pym/portage/cache/metadata.py b/portage_with_autodep/pym/portage/cache/metadata.py
new file mode 100644
index 0000000..4c735d7
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/metadata.py
@@ -0,0 +1,154 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import errno
+import re
+import stat
+import sys
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.cache import cache_errors, flat_hash
+import portage.eclass_cache
+from portage.cache.template import reconstruct_eclasses
+from portage.cache.mappings import ProtectedDict
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+# this is the old cache format, flat_list. count maintained here.
+magic_line_count = 22
+
+# store the current key order *here*.
+class database(flat_hash.database):
+ complete_eclass_entries = False
+ auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
+ 'PDEPEND', 'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
+
+ autocommits = True
+ serialize_eclasses = False
+
+ _hashed_re = re.compile('^(\\w+)=([^\n]*)')
+
+ def __init__(self, location, *args, **config):
+ loc = location
+ super(database, self).__init__(location, *args, **config)
+ self.location = os.path.join(loc, "metadata","cache")
+ self.ec = None
+ self.raise_stat_collision = False
+
+ def _parse_data(self, data, cpv):
+ _hashed_re_match = self._hashed_re.match
+ d = {}
+
+ for line in data:
+ hashed = False
+ hashed_match = _hashed_re_match(line)
+ if hashed_match is None:
+ d.clear()
+ try:
+ for i, key in enumerate(self.auxdbkey_order):
+ d[key] = data[i]
+ except IndexError:
+ pass
+ break
+ else:
+ d[hashed_match.group(1)] = hashed_match.group(2)
+
+ if "_eclasses_" not in d:
+ if "INHERITED" in d:
+ if self.ec is None:
+ self.ec = portage.eclass_cache.cache(self.location[:-15])
+ try:
+ d["_eclasses_"] = self.ec.get_eclass_data(
+ d["INHERITED"].split())
+ except KeyError as e:
+ # INHERITED contains a non-existent eclass.
+ raise cache_errors.CacheCorruption(cpv, e)
+ del d["INHERITED"]
+ else:
+ d["_eclasses_"] = {}
+ elif isinstance(d["_eclasses_"], basestring):
+ # We skip this if flat_hash.database._parse_data() was called above
+ # because it calls reconstruct_eclasses() internally.
+ d["_eclasses_"] = reconstruct_eclasses(None, d["_eclasses_"])
+
+ return d
+
+ def _setitem(self, cpv, values):
+ if "_eclasses_" in values:
+ values = ProtectedDict(values)
+ values["INHERITED"] = ' '.join(sorted(values["_eclasses_"]))
+
+ new_content = []
+ for k in self.auxdbkey_order:
+ new_content.append(values.get(k, ''))
+ new_content.append('\n')
+ for i in range(magic_line_count - len(self.auxdbkey_order)):
+ new_content.append('\n')
+ new_content = ''.join(new_content)
+ new_content = _unicode_encode(new_content,
+ _encodings['repo.content'], errors='backslashreplace')
+
+ new_fp = os.path.join(self.location, cpv)
+ try:
+ f = open(_unicode_encode(new_fp,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ except EnvironmentError:
+ pass
+ else:
+ try:
+ try:
+ existing_st = os.fstat(f.fileno())
+ existing_content = f.read()
+ finally:
+ f.close()
+ except EnvironmentError:
+ pass
+ else:
+ existing_mtime = existing_st[stat.ST_MTIME]
+ if values['_mtime_'] == existing_mtime and \
+ existing_content == new_content:
+ return
+
+ if self.raise_stat_collision and \
+ values['_mtime_'] == existing_mtime and \
+ len(new_content) == existing_st.st_size:
+ raise cache_errors.StatCollision(cpv, new_fp,
+ existing_mtime, existing_st.st_size)
+
+ s = cpv.rfind("/")
+ fp = os.path.join(self.location,cpv[:s],
+ ".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try:
+ myf = open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ except EnvironmentError as e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf = open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ except EnvironmentError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try:
+ myf.write(new_content)
+ finally:
+ myf.close()
+ self._ensure_access(fp, mtime=values["_mtime_"])
+
+ try:
+ os.rename(fp, new_fp)
+ except EnvironmentError as e:
+ try:
+ os.unlink(fp)
+ except EnvironmentError:
+ pass
+ raise cache_errors.CacheCorruption(cpv, e)
diff --git a/portage_with_autodep/pym/portage/cache/metadata_overlay.py b/portage_with_autodep/pym/portage/cache/metadata_overlay.py
new file mode 100644
index 0000000..cfa0051
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/metadata_overlay.py
@@ -0,0 +1,105 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.cache import template
+from portage.cache.cache_errors import CacheCorruption
+from portage.cache.flat_hash import database as db_rw
+from portage.cache.metadata import database as db_ro
+
+class database(template.database):
+
+ serialize_eclasses = False
+
+ def __init__(self, location, label, auxdbkeys, db_rw=db_rw, db_ro=db_ro,
+ *args, **config):
+ super_config = config.copy()
+ super_config.pop("gid", None)
+ super_config.pop("perms", None)
+ super(database, self).__init__(location, label, auxdbkeys,
+ *args, **super_config)
+ self.db_rw = db_rw(location, label, auxdbkeys, **config)
+ self.commit = self.db_rw.commit
+ self.autocommits = self.db_rw.autocommits
+ if isinstance(db_ro, type):
+ ro_config = config.copy()
+ ro_config["readonly"] = True
+ self.db_ro = db_ro(label, "metadata/cache", auxdbkeys, **ro_config)
+ else:
+ self.db_ro = db_ro
+
+ def __getitem__(self, cpv):
+ """funnel whiteout validation through here, since value needs to be fetched"""
+ try:
+ value = self.db_rw[cpv]
+ except KeyError:
+ return self.db_ro[cpv] # raises a KeyError when necessary
+ except CacheCorruption:
+ del self.db_rw[cpv]
+ return self.db_ro[cpv] # raises a KeyError when necessary
+ if self._is_whiteout(value):
+ if self._is_whiteout_valid(cpv, value):
+ raise KeyError(cpv)
+ else:
+ del self.db_rw[cpv]
+ return self.db_ro[cpv] # raises a KeyError when necessary
+ else:
+ return value
+
+ def _setitem(self, name, values):
+ try:
+ value_ro = self.db_ro.get(name)
+ except CacheCorruption:
+ value_ro = None
+ if value_ro is not None and \
+ self._are_values_identical(value_ro, values):
+ # we have matching values in the underlying db_ro
+ # so it is unnecessary to store data in db_rw
+ try:
+ del self.db_rw[name] # delete unwanted whiteout when necessary
+ except KeyError:
+ pass
+ return
+ self.db_rw[name] = values
+
+ def _delitem(self, cpv):
+ value = self[cpv] # validates whiteout and/or raises a KeyError when necessary
+ if cpv in self.db_ro:
+ self.db_rw[cpv] = self._create_whiteout(value)
+ else:
+ del self.db_rw[cpv]
+
+ def __contains__(self, cpv):
+ try:
+ self[cpv] # validates whiteout when necessary
+ except KeyError:
+ return False
+ return True
+
+ def __iter__(self):
+ s = set()
+ for cpv in self.db_rw:
+ if cpv in self: # validates whiteout when necessary
+ yield cpv
+ # set includes whiteouts so they won't be yielded later
+ s.add(cpv)
+ for cpv in self.db_ro:
+ if cpv not in s:
+ yield cpv
+
+ def _is_whiteout(self, value):
+ return value["EAPI"] == "whiteout"
+
+ def _create_whiteout(self, value):
+ return {"EAPI":"whiteout","_eclasses_":value["_eclasses_"],"_mtime_":value["_mtime_"]}
+
+ def _is_whiteout_valid(self, name, value_rw):
+ try:
+ value_ro = self.db_ro[name]
+ return self._are_values_identical(value_rw,value_ro)
+ except KeyError:
+ return False
+
+ def _are_values_identical(self, value1, value2):
+ if value1['_mtime_'] != value2['_mtime_']:
+ return False
+ return value1["_eclasses_"] == value2["_eclasses_"]
diff --git a/portage_with_autodep/pym/portage/cache/sql_template.py b/portage_with_autodep/pym/portage/cache/sql_template.py
new file mode 100644
index 0000000..d023b1b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/sql_template.py
@@ -0,0 +1,301 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import sys
+from portage.cache import template, cache_errors
+from portage.cache.template import reconstruct_eclasses
+
+class SQLDatabase(template.database):
+ """template class for RDBM based caches
+
+ This class is designed such that derivatives don't have to change much code, mostly constant strings.
+ _BaseError must be an exception class that all Exceptions thrown from the derived RDBMS are derived
+ from.
+
+ SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the RDBMS, as should SCHEMA_PACKAGE_CREATE-
+ basically you need to deal with creation of a unique pkgid. If the dbapi2 rdbms class has a method of
+ recovering that id, then modify _insert_cpv to remove the extra select.
+
+ Creation of a derived class involves supplying _initdb_con, and table_exists.
+ Additionally, the default schemas may have to be modified.
+ """
+
+ SCHEMA_PACKAGE_NAME = "package_cache"
+ SCHEMA_PACKAGE_CREATE = "CREATE TABLE %s (\
+ pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME
+ SCHEMA_PACKAGE_DROP = "DROP TABLE %s" % SCHEMA_PACKAGE_NAME
+
+ SCHEMA_VALUES_NAME = "values_cache"
+ SCHEMA_VALUES_CREATE = "CREATE TABLE %s ( pkgid integer references %s (pkgid) on delete cascade, \
+ key varchar(255), value text, UNIQUE(pkgid, key))" % (SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME)
+ SCHEMA_VALUES_DROP = "DROP TABLE %s" % SCHEMA_VALUES_NAME
+ SCHEMA_INSERT_CPV_INTO_PACKAGE = "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME
+
+ _BaseError = ()
+ _dbClass = None
+
+ autocommits = False
+# cleanse_keys = True
+
+ # boolean indicating if the derived RDBMS class supports replace syntax
+ _supports_replace = False
+
+ def __init__(self, location, label, auxdbkeys, *args, **config):
+ """initialize the instance.
+ derived classes shouldn't need to override this"""
+
+ super(SQLDatabase, self).__init__(location, label, auxdbkeys, *args, **config)
+
+ config.setdefault("host","127.0.0.1")
+ config.setdefault("autocommit", self.autocommits)
+ self._initdb_con(config)
+
+ self.label = self._sfilter(self.label)
+
+
+ def _dbconnect(self, config):
+ """should be overridden if the derived class needs special parameters for initializing
+ the db connection, or cursor"""
+ self.db = self._dbClass(**config)
+ self.con = self.db.cursor()
+
+
+ def _initdb_con(self,config):
+ """ensure needed tables are in place.
+ If the derived class needs a different set of table creation commands, overload the approriate
+ SCHEMA_ attributes. If it needs additional execution beyond, override"""
+
+ self._dbconnect(config)
+ if not self._table_exists(self.SCHEMA_PACKAGE_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_PACKAGE_NAME)
+ try:
+ self.con.execute(self.SCHEMA_PACKAGE_CREATE)
+ except self._BaseError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ if not self._table_exists(self.SCHEMA_VALUES_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_VALUES_NAME)
+ try:
+ self.con.execute(self.SCHEMA_VALUES_CREATE)
+ except self._BaseError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+
+ def _table_exists(self, tbl):
+ """return true if a table exists
+ derived classes must override this"""
+ raise NotImplementedError
+
+
+ def _sfilter(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"")
+
+
+ def _getitem(self, cpv):
+ try:
+ self.con.execute("SELECT key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label, self._sfilter(cpv)))
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ rows = self.con.fetchall()
+
+ if len(rows) == 0:
+ raise KeyError(cpv)
+
+ vals = dict([(k,"") for k in self._known_keys])
+ vals.update(dict(rows))
+ return vals
+
+
+ def _delitem(self, cpv):
+ """delete a cpv cache entry
+ derived RDBM classes for this *must* either support cascaded deletes, or
+ override this method"""
+ try:
+ try:
+ self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ if self.autocommits:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+ if self.con.rowcount <= 0:
+ raise KeyError(cpv)
+ except SystemExit:
+ raise
+ except Exception:
+ if not self.autocommits:
+ self.db.rollback()
+ # yes, this can roll back a lot more then just the delete. deal.
+ raise
+
+ def __del__(self):
+ # just to be safe.
+ if "db" in self.__dict__ and self.db != None:
+ self.commit()
+ self.db.close()
+
+ def _setitem(self, cpv, values):
+
+ try:
+ # insert.
+ try:
+ pkgid = self._insert_cpv(cpv)
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ # __getitem__ fills out missing values,
+ # so we store only what's handed to us and is a known key
+ db_values = []
+ for key in self._known_keys:
+ if key in values and values[key]:
+ db_values.append({"key":key, "value":values[key]})
+
+ if len(db_values) > 0:
+ try:
+ self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \
+ (self.SCHEMA_VALUES_NAME, str(pkgid)), db_values)
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ if self.autocommits:
+ self.commit()
+
+ except SystemExit:
+ raise
+ except Exception:
+ if not self.autocommits:
+ try:
+ self.db.rollback()
+ except self._BaseError:
+ pass
+ raise
+
+
+ def _insert_cpv(self, cpv):
+ """uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded if the table definition
+ doesn't support auto-increment columns for pkgid.
+ returns the cpvs new pkgid
+ note this doesn't commit the transaction. The caller is expected to."""
+
+ cpv = self._sfilter(cpv)
+ if self._supports_replace:
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1)
+ else:
+ # just delete it.
+ try:
+ del self[cpv]
+ except (cache_errors.CacheCorruption, KeyError):
+ pass
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE
+ try:
+ self.con.execute(query_str % (self.label, cpv))
+ except self._BaseError:
+ self.db.rollback()
+ raise
+ self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, cpv))
+
+ if self.con.rowcount != 1:
+ raise cache_error.CacheCorruption(cpv, "Tried to insert the cpv, but found "
+ " %i matches upon the following select!" % len(rows))
+ return self.con.fetchone()[0]
+
+
+ def __contains__(self, cpv):
+ if not self.autocommits:
+ try:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try:
+ self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+ return self.con.rowcount > 0
+
+
+ def __iter__(self):
+ if not self.autocommits:
+ try:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try:
+ self.con.execute("SELECT cpv FROM %s WHERE label=%s" %
+ (self.SCHEMA_PACKAGE_NAME, self.label))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+# return [ row[0] for row in self.con.fetchall() ]
+ for x in self.con.fetchall():
+ yield x[0]
+
+ def iteritems(self):
+ try:
+ self.con.execute("SELECT cpv, key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label))
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ oldcpv = None
+ l = []
+ for x, y, v in self.con.fetchall():
+ if oldcpv != x:
+ if oldcpv != None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+ else:
+ d["_eclasses_"] = {}
+ yield cpv, d
+ l.clear()
+ oldcpv = x
+ l.append((y,v))
+ if oldcpv != None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+ else:
+ d["_eclasses_"] = {}
+ yield cpv, d
+
+ def commit(self):
+ self.db.commit()
+
+ def get_matches(self,match_dict):
+ query_list = []
+ for k,v in match_dict.items():
+ if k not in self._known_keys:
+ raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance")
+ v = v.replace("%","\\%")
+ v = v.replace(".*","%")
+ query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v)))
+
+ if len(query_list):
+ query = " AND "+" AND ".join(query_list)
+ else:
+ query = ''
+
+ print("query = SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % (self.label, query))
+ try:
+ self.con.execute("SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % \
+ (self.label, query))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ return [ row[0] for row in self.con.fetchall() ]
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
diff --git a/portage_with_autodep/pym/portage/cache/sqlite.py b/portage_with_autodep/pym/portage/cache/sqlite.py
new file mode 100644
index 0000000..fcc62ff
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/sqlite.py
@@ -0,0 +1,245 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.cache import fs_template
+from portage.cache import cache_errors
+from portage import os
+from portage import _unicode_decode
+from portage.util import writemsg
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class database(fs_template.FsBased):
+
+ autocommits = False
+ synchronous = False
+ # cache_bytes is used together with page_size (set at sqlite build time)
+ # to calculate the number of pages requested, according to the following
+ # equation: cache_bytes = page_bytes * page_count
+ cache_bytes = 1024 * 1024 * 10
+ _db_table = None
+
+ def __init__(self, *args, **config):
+ super(database, self).__init__(*args, **config)
+ self._import_sqlite()
+ self._allowed_keys = ["_mtime_", "_eclasses_"]
+ self._allowed_keys.extend(self._known_keys)
+ self._allowed_keys.sort()
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if not self.readonly and not os.path.exists(self.location):
+ self._ensure_dirs()
+
+ config.setdefault("autocommit", self.autocommits)
+ config.setdefault("cache_bytes", self.cache_bytes)
+ config.setdefault("synchronous", self.synchronous)
+ # Timeout for throwing a "database is locked" exception (pysqlite
+ # default is 5.0 seconds).
+ config.setdefault("timeout", 15)
+ self._db_init_connection(config)
+ self._db_init_structures()
+
+ def _import_sqlite(self):
+ # sqlite3 is optional with >=python-2.5
+ try:
+ import sqlite3 as db_module
+ except ImportError:
+ try:
+ from pysqlite2 import dbapi2 as db_module
+ except ImportError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ self._db_module = db_module
+ self._db_error = db_module.Error
+
+ def _db_escape_string(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ if not isinstance(s, basestring):
+ # Avoid potential UnicodeEncodeError in python-2.x by
+ # only calling str() when it's absolutely necessary.
+ s = str(s)
+ # This is equivalent to the _quote function from pysqlite 1.1.
+ return "'%s'" % s.replace("'", "''")
+
+ def _db_init_connection(self, config):
+ self._dbpath = self.location + ".sqlite"
+ #if os.path.exists(self._dbpath):
+ # os.unlink(self._dbpath)
+ connection_kwargs = {}
+ connection_kwargs["timeout"] = config["timeout"]
+ try:
+ if not self.readonly:
+ self._ensure_dirs()
+ self._db_connection = self._db_module.connect(
+ database=_unicode_decode(self._dbpath), **connection_kwargs)
+ self._db_cursor = self._db_connection.cursor()
+ self._db_cursor.execute("PRAGMA encoding = %s" % self._db_escape_string("UTF-8"))
+ if not self.readonly and not self._ensure_access(self._dbpath):
+ raise cache_errors.InitializationError(self.__class__, "can't ensure perms on %s" % self._dbpath)
+ self._db_init_cache_size(config["cache_bytes"])
+ self._db_init_synchronous(config["synchronous"])
+ except self._db_error as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ def _db_init_structures(self):
+ self._db_table = {}
+ self._db_table["packages"] = {}
+ mytable = "portage_packages"
+ self._db_table["packages"]["table_name"] = mytable
+ self._db_table["packages"]["package_id"] = "internal_db_package_id"
+ self._db_table["packages"]["package_key"] = "portage_package_key"
+ self._db_table["packages"]["internal_columns"] = \
+ [self._db_table["packages"]["package_id"],
+ self._db_table["packages"]["package_key"]]
+ create_statement = []
+ create_statement.append("CREATE TABLE")
+ create_statement.append(mytable)
+ create_statement.append("(")
+ table_parameters = []
+ table_parameters.append("%s INTEGER PRIMARY KEY AUTOINCREMENT" % self._db_table["packages"]["package_id"])
+ table_parameters.append("%s TEXT" % self._db_table["packages"]["package_key"])
+ for k in self._allowed_keys:
+ table_parameters.append("%s TEXT" % k)
+ table_parameters.append("UNIQUE(%s)" % self._db_table["packages"]["package_key"])
+ create_statement.append(",".join(table_parameters))
+ create_statement.append(")")
+
+ self._db_table["packages"]["create"] = " ".join(create_statement)
+ self._db_table["packages"]["columns"] = \
+ self._db_table["packages"]["internal_columns"] + \
+ self._allowed_keys
+
+ cursor = self._db_cursor
+ for k, v in self._db_table.items():
+ if self._db_table_exists(v["table_name"]):
+ create_statement = self._db_table_get_create(v["table_name"])
+ if create_statement != v["create"]:
+ writemsg(_("sqlite: dropping old table: %s\n") % v["table_name"])
+ cursor.execute("DROP TABLE %s" % v["table_name"])
+ cursor.execute(v["create"])
+ else:
+ cursor.execute(v["create"])
+
+ def _db_table_exists(self, table_name):
+ """return true/false dependant on a tbl existing"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT name FROM sqlite_master WHERE type=\"table\" AND name=%s" % \
+ self._db_escape_string(table_name))
+ return len(cursor.fetchall()) == 1
+
+ def _db_table_get_create(self, table_name):
+ """return true/false dependant on a tbl existing"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT sql FROM sqlite_master WHERE name=%s" % \
+ self._db_escape_string(table_name))
+ return cursor.fetchall()[0][0]
+
+ def _db_init_cache_size(self, cache_bytes):
+ cursor = self._db_cursor
+ cursor.execute("PRAGMA page_size")
+ page_size=int(cursor.fetchone()[0])
+ # number of pages, sqlite default is 2000
+ cache_size = cache_bytes / page_size
+ cursor.execute("PRAGMA cache_size = %d" % cache_size)
+ cursor.execute("PRAGMA cache_size")
+ actual_cache_size = int(cursor.fetchone()[0])
+ del cursor
+ if actual_cache_size != cache_size:
+ raise cache_errors.InitializationError(self.__class__,"actual cache_size = "+actual_cache_size+" does does not match requested size of "+cache_size)
+
+ def _db_init_synchronous(self, synchronous):
+ cursor = self._db_cursor
+ cursor.execute("PRAGMA synchronous = %d" % synchronous)
+ cursor.execute("PRAGMA synchronous")
+ actual_synchronous=int(cursor.fetchone()[0])
+ del cursor
+ if actual_synchronous!=synchronous:
+ raise cache_errors.InitializationError(self.__class__,"actual synchronous = "+actual_synchronous+" does does not match requested value of "+synchronous)
+
+ def _getitem(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute("select * from %s where %s=%s" % \
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv)))
+ result = cursor.fetchall()
+ if len(result) == 1:
+ pass
+ elif len(result) == 0:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, "key is not unique")
+ d = {}
+ internal_columns = self._db_table["packages"]["internal_columns"]
+ column_index = -1
+ for k in self._db_table["packages"]["columns"]:
+ column_index +=1
+ if k not in internal_columns:
+ d[k] = result[0][column_index]
+
+ return d
+
+ def _setitem(self, cpv, values):
+ update_statement = []
+ update_statement.append("REPLACE INTO %s" % self._db_table["packages"]["table_name"])
+ update_statement.append("(")
+ update_statement.append(','.join([self._db_table["packages"]["package_key"]] + self._allowed_keys))
+ update_statement.append(")")
+ update_statement.append("VALUES")
+ update_statement.append("(")
+ values_parameters = []
+ values_parameters.append(self._db_escape_string(cpv))
+ for k in self._allowed_keys:
+ values_parameters.append(self._db_escape_string(values.get(k, '')))
+ update_statement.append(",".join(values_parameters))
+ update_statement.append(")")
+ cursor = self._db_cursor
+ try:
+ s = " ".join(update_statement)
+ cursor.execute(s)
+ except self._db_error as e:
+ writemsg("%s: %s\n" % (cpv, str(e)))
+ raise
+
+ def commit(self):
+ self._db_connection.commit()
+
+ def _delitem(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute("DELETE FROM %s WHERE %s=%s" % \
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv)))
+
+ def __contains__(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute(" ".join(
+ ["SELECT %s FROM %s" %
+ (self._db_table["packages"]["package_id"],
+ self._db_table["packages"]["table_name"]),
+ "WHERE %s=%s" % (
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv))]))
+ result = cursor.fetchall()
+ if len(result) == 0:
+ return False
+ elif len(result) == 1:
+ return True
+ else:
+ raise cache_errors.CacheCorruption(cpv, "key is not unique")
+
+ def __iter__(self):
+ """generator for walking the dir struct"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT %s FROM %s" % \
+ (self._db_table["packages"]["package_key"],
+ self._db_table["packages"]["table_name"]))
+ result = cursor.fetchall()
+ key_list = [x[0] for x in result]
+ del result
+ while key_list:
+ yield key_list.pop()
diff --git a/portage_with_autodep/pym/portage/cache/template.py b/portage_with_autodep/pym/portage/cache/template.py
new file mode 100644
index 0000000..f84d8f4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/template.py
@@ -0,0 +1,236 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+from portage.cache import cache_errors
+from portage.cache.cache_errors import InvalidRestriction
+from portage.cache.mappings import ProtectedDict
+import sys
+import warnings
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class database(object):
+ # this is for metadata/cache transfer.
+ # basically flags the cache needs be updated when transfered cache to cache.
+ # leave this.
+
+ complete_eclass_entries = True
+ autocommits = False
+ cleanse_keys = False
+ serialize_eclasses = True
+
+ def __init__(self, location, label, auxdbkeys, readonly=False):
+ """ initialize the derived class; specifically, store label/keys"""
+ self._known_keys = auxdbkeys
+ self.location = location
+ self.label = label
+ self.readonly = readonly
+ self.sync_rate = 0
+ self.updates = 0
+
+ def __getitem__(self, cpv):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the __eclasses__ conversion.
+ that said, if the class handles it, they can override it."""
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+ d=self._getitem(cpv)
+ if self.serialize_eclasses and "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"])
+ elif "_eclasses_" not in d:
+ d["_eclasses_"] = {}
+ mtime = d.get('_mtime_')
+ if mtime is None:
+ raise cache_errors.CacheCorruption(cpv,
+ '_mtime_ field is missing')
+ try:
+ mtime = long(mtime)
+ except ValueError:
+ raise cache_errors.CacheCorruption(cpv,
+ '_mtime_ conversion to long failed: %s' % (mtime,))
+ d['_mtime_'] = mtime
+ return d
+
+ def _getitem(self, cpv):
+ """get cpv's values.
+ override this in derived classess"""
+ raise NotImplementedError
+
+ def __setitem__(self, cpv, values):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ if self.cleanse_keys:
+ d=ProtectedDict(values)
+ for k, v in list(d.items()):
+ if not v:
+ del d[k]
+ if self.serialize_eclasses and "_eclasses_" in values:
+ d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
+ elif self.serialize_eclasses and "_eclasses_" in values:
+ d = ProtectedDict(values)
+ d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
+ else:
+ d = values
+ self._setitem(cpv, d)
+ if not self.autocommits:
+ self.updates += 1
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _setitem(self, name, values):
+ """__setitem__ calls this after readonly checks. override it in derived classes
+ note _eclassees_ key *must* be handled"""
+ raise NotImplementedError
+
+ def __delitem__(self, cpv):
+ """delete a key from the cache.
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ if not self.autocommits:
+ self.updates += 1
+ self._delitem(cpv)
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _delitem(self,cpv):
+ """__delitem__ calls this after readonly checks. override it in derived classes"""
+ raise NotImplementedError
+
+ def has_key(self, cpv):
+ return cpv in self
+
+ def keys(self):
+ return list(self)
+
+ def iterkeys(self):
+ return iter(self)
+
+ def iteritems(self):
+ for x in self:
+ yield (x, self[x])
+
+ def items(self):
+ return list(self.iteritems())
+
+ def sync(self, rate=0):
+ self.sync_rate = rate
+ if(rate == 0):
+ self.commit()
+
+ def commit(self):
+ if not self.autocommits:
+ raise NotImplementedError
+
+ def __contains__(self, cpv):
+ """This method should always be overridden. It is provided only for
+ backward compatibility with modules that override has_key instead. It
+ will automatically raise a NotImplementedError if has_key has not been
+ overridden."""
+ if self.has_key is database.has_key:
+ # prevent a possible recursive loop
+ raise NotImplementedError
+ warnings.warn("portage.cache.template.database.has_key() is "
+ "deprecated, override __contains__ instead",
+ DeprecationWarning)
+ return self.has_key(cpv)
+
+ def __iter__(self):
+ """This method should always be overridden. It is provided only for
+ backward compatibility with modules that override iterkeys instead. It
+ will automatically raise a NotImplementedError if iterkeys has not been
+ overridden."""
+ if self.iterkeys is database.iterkeys:
+ # prevent a possible recursive loop
+ raise NotImplementedError(self)
+ return iter(self.keys())
+
+ def get(self, k, x=None):
+ try:
+ return self[k]
+ except KeyError:
+ return x
+
+ def get_matches(self, match_dict):
+ """generic function for walking the entire cache db, matching restrictions to
+ filter what cpv's are returned. Derived classes should override this if they
+ can implement a faster method then pulling each cpv:values, and checking it.
+
+ For example, RDBMS derived classes should push the matching logic down to the
+ actual RDBM."""
+
+ import re
+ restricts = {}
+ for key,match in match_dict.items():
+ # XXX this sucks.
+ try:
+ if isinstance(match, basestring):
+ restricts[key] = re.compile(match).match
+ else:
+ restricts[key] = re.compile(match[0],match[1]).match
+ except re.error as e:
+ raise InvalidRestriction(key, match, e)
+ if key not in self.__known_keys:
+ raise InvalidRestriction(key, match, "Key isn't valid")
+
+ for cpv in self:
+ cont = True
+ vals = self[cpv]
+ for key, match in restricts.items():
+ if not match(vals[key]):
+ cont = False
+ break
+ if cont:
+ yield cpv
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
+
+def serialize_eclasses(eclass_dict):
+ """takes a dict, returns a string representing said dict"""
+ """The "new format", which causes older versions of <portage-2.1.2 to
+ traceback with a ValueError due to failed long() conversion. This format
+ isn't currently written, but the the capability to read it is already built
+ in.
+ return "\t".join(["%s\t%s" % (k, str(v)) \
+ for k, v in eclass_dict.iteritems()])
+ """
+ if not eclass_dict:
+ return ""
+ return "\t".join(k + "\t%s\t%s" % eclass_dict[k] \
+ for k in sorted(eclass_dict))
+
+def reconstruct_eclasses(cpv, eclass_string):
+ """returns a dict when handed a string generated by serialize_eclasses"""
+ eclasses = eclass_string.rstrip().lstrip().split("\t")
+ if eclasses == [""]:
+ # occasionally this occurs in the fs backends. they suck.
+ return {}
+
+ if len(eclasses) % 2 != 0 and len(eclasses) % 3 != 0:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
+ d={}
+ try:
+ if eclasses[1].isdigit():
+ for x in range(0, len(eclasses), 2):
+ d[eclasses[x]] = ("", long(eclasses[x + 1]))
+ else:
+ # The old format contains paths that will be discarded.
+ for x in range(0, len(eclasses), 3):
+ d[eclasses[x]] = (eclasses[x + 1], long(eclasses[x + 2]))
+ except IndexError:
+ raise cache_errors.CacheCorruption(cpv,
+ "_eclasses_ was of invalid len %i" % len(eclasses))
+ except ValueError:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ mtime conversion to long failed")
+ del eclasses
+ return d
diff --git a/portage_with_autodep/pym/portage/cache/util.py b/portage_with_autodep/pym/portage/cache/util.py
new file mode 100644
index 0000000..b824689
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/util.py
@@ -0,0 +1,170 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+from __future__ import print_function
+
+__all__ = ["mirror_cache", "non_quiet_mirroring", "quiet_mirroring"]
+
+from itertools import chain
+from portage.cache import cache_errors
+from portage.localization import _
+
+def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, eclass_cache=None, verbose_instance=None):
+
+ from portage import eapi_is_supported, \
+ _validate_cache_for_unsupported_eapis
+ if not src_cache.complete_eclass_entries and not eclass_cache:
+ raise Exception("eclass_cache required for cache's of class %s!" % src_cache.__class__)
+
+ if verbose_instance == None:
+ noise=quiet_mirroring()
+ else:
+ noise=verbose_instance
+
+ dead_nodes = set(trg_cache)
+ count=0
+
+ if not trg_cache.autocommits:
+ trg_cache.sync(100)
+
+ for x in valid_nodes_iterable:
+# print "processing x=",x
+ count+=1
+ dead_nodes.discard(x)
+ try:
+ entry = src_cache[x]
+ except KeyError as e:
+ noise.missing_entry(x)
+ del e
+ continue
+ except cache_errors.CacheError as ce:
+ noise.exception(x, ce)
+ del ce
+ continue
+
+ eapi = entry.get('EAPI')
+ if not eapi:
+ eapi = '0'
+ eapi = eapi.lstrip('-')
+ eapi_supported = eapi_is_supported(eapi)
+ if not eapi_supported:
+ if not _validate_cache_for_unsupported_eapis:
+ noise.misc(x, _("unable to validate cache for EAPI='%s'") % eapi)
+ continue
+
+ write_it = True
+ trg = None
+ try:
+ trg = trg_cache[x]
+ except (KeyError, cache_errors.CacheError):
+ pass
+ else:
+ if trg['_mtime_'] == entry['_mtime_'] and \
+ eclass_cache.is_eclass_data_valid(trg['_eclasses_']) and \
+ set(trg['_eclasses_']) == set(entry['_eclasses_']):
+ write_it = False
+
+ for d in (entry, trg):
+ if d is not None and d.get('EAPI') in ('', '0'):
+ del d['EAPI']
+
+ if trg and not write_it:
+ """ We don't want to skip the write unless we're really sure that
+ the existing cache is identical, so don't trust _mtime_ and
+ _eclasses_ alone."""
+ for k in set(chain(entry, trg)).difference(
+ ("_mtime_", "_eclasses_")):
+ if trg.get(k, "") != entry.get(k, ""):
+ write_it = True
+ break
+
+ if write_it:
+ try:
+ inherited = entry.get("INHERITED", "")
+ eclasses = entry.get("_eclasses_")
+ except cache_errors.CacheError as ce:
+ noise.exception(x, ce)
+ del ce
+ continue
+
+ if eclasses is not None:
+ if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]):
+ noise.eclass_stale(x)
+ continue
+ inherited = eclasses
+ else:
+ inherited = inherited.split()
+
+ if inherited:
+ if src_cache.complete_eclass_entries and eclasses is None:
+ noise.corruption(x, "missing _eclasses_ field")
+ continue
+
+ # Even if _eclasses_ already exists, replace it with data from
+ # eclass_cache, in order to insert local eclass paths.
+ try:
+ eclasses = eclass_cache.get_eclass_data(inherited)
+ except KeyError:
+ # INHERITED contains a non-existent eclass.
+ noise.eclass_stale(x)
+ continue
+
+ if eclasses is None:
+ noise.eclass_stale(x)
+ continue
+ entry["_eclasses_"] = eclasses
+
+ if not eapi_supported:
+ for k in set(entry).difference(("_mtime_", "_eclasses_")):
+ entry[k] = ""
+ entry["EAPI"] = "-" + eapi
+
+ # by this time, if it reaches here, the eclass has been validated, and the entry has
+ # been updated/translated (if needs be, for metadata/cache mainly)
+ try:
+ trg_cache[x] = entry
+ except cache_errors.CacheError as ce:
+ noise.exception(x, ce)
+ del ce
+ continue
+ if count >= noise.call_update_min:
+ noise.update(x)
+ count = 0
+
+ if not trg_cache.autocommits:
+ trg_cache.commit()
+
+ # ok. by this time, the trg_cache is up to date, and we have a dict
+ # with a crapload of cpv's. we now walk the target db, removing stuff if it's in the list.
+ for key in dead_nodes:
+ try:
+ del trg_cache[key]
+ except KeyError:
+ pass
+ except cache_errors.CacheError as ce:
+ noise.exception(ce)
+ del ce
+ noise.finish()
+
+
+class quiet_mirroring(object):
+ # call_update_every is used by mirror_cache to determine how often to call in.
+ # quiet defaults to 2^24 -1. Don't call update, 'cept once every 16 million or so :)
+ call_update_min = 0xffffff
+ def update(self,key,*arg): pass
+ def exception(self,key,*arg): pass
+ def eclass_stale(self,*arg): pass
+ def missing_entry(self, key): pass
+ def misc(self,key,*arg): pass
+ def corruption(self, key, s): pass
+ def finish(self, *arg): pass
+
+class non_quiet_mirroring(quiet_mirroring):
+ call_update_min=1
+ def update(self,key,*arg): print("processed",key)
+ def exception(self, key, *arg): print("exec",key,arg)
+ def missing(self,key): print("key %s is missing", key)
+ def corruption(self,key,*arg): print("corrupt %s:" % key,arg)
+ def eclass_stale(self,key,*arg):print("stale %s:"%key,arg)
+
diff --git a/portage_with_autodep/pym/portage/cache/volatile.py b/portage_with_autodep/pym/portage/cache/volatile.py
new file mode 100644
index 0000000..0bf6bab
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/volatile.py
@@ -0,0 +1,25 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from portage.cache import template
+
+class database(template.database):
+
+ autocommits = True
+ serialize_eclasses = False
+
+ def __init__(self, *args, **config):
+ config.pop("gid", None)
+ config.pop("perms", None)
+ super(database, self).__init__(*args, **config)
+ self._data = {}
+ self.__iter__ = self._data.__iter__
+ self._delitem = self._data.__delitem__
+ self.__contains__ = self._data.__contains__
+
+ def _setitem(self, name, values):
+ self._data[name] = copy.deepcopy(values)
+
+ def _getitem(self, cpv):
+ return copy.deepcopy(self._data[cpv])
diff --git a/portage_with_autodep/pym/portage/checksum.py b/portage_with_autodep/pym/portage/checksum.py
new file mode 100644
index 0000000..9e7e455
--- /dev/null
+++ b/portage_with_autodep/pym/portage/checksum.py
@@ -0,0 +1,291 @@
+# checksum.py -- core Portage functionality
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.const import PRELINK_BINARY,HASHING_BLOCKSIZE
+from portage.localization import _
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+import errno
+import stat
+import tempfile
+
+#dict of all available hash functions
+hashfunc_map = {}
+hashorigin_map = {}
+
+def _generate_hash_function(hashtype, hashobject, origin="unknown"):
+ def pyhash(filename):
+ """
+ Run a checksum against a file.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @return: The hash and size of the data
+ """
+ try:
+ f = open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ except IOError as e:
+ func_call = "open('%s')" % filename
+ if e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.ENOENT:
+ raise portage.exception.FileNotFound(filename)
+ else:
+ raise
+ blocksize = HASHING_BLOCKSIZE
+ data = f.read(blocksize)
+ size = 0
+ checksum = hashobject()
+ while data:
+ checksum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+ f.close()
+
+ return (checksum.hexdigest(), size)
+ hashfunc_map[hashtype] = pyhash
+ hashorigin_map[hashtype] = origin
+ return pyhash
+
+# Define hash functions, try to use the best module available. Later definitions
+# override earlier ones
+
+# Use the internal modules as last fallback
+try:
+ from hashlib import md5 as _new_md5
+except ImportError:
+ from md5 import new as _new_md5
+
+md5hash = _generate_hash_function("MD5", _new_md5, origin="internal")
+
+try:
+ from hashlib import sha1 as _new_sha1
+except ImportError:
+ from sha import new as _new_sha1
+
+sha1hash = _generate_hash_function("SHA1", _new_sha1, origin="internal")
+
+# Use pycrypto when available, prefer it over the internal fallbacks
+try:
+ from Crypto.Hash import SHA256, RIPEMD
+ sha256hash = _generate_hash_function("SHA256", SHA256.new, origin="pycrypto")
+ rmd160hash = _generate_hash_function("RMD160", RIPEMD.new, origin="pycrypto")
+except ImportError as e:
+ pass
+
+# Use hashlib from python-2.5 if available and prefer it over pycrypto and internal fallbacks.
+# Need special handling for RMD160 as it may not always be provided by hashlib.
+try:
+ import hashlib
+
+ md5hash = _generate_hash_function("MD5", hashlib.md5, origin="hashlib")
+ sha1hash = _generate_hash_function("SHA1", hashlib.sha1, origin="hashlib")
+ sha256hash = _generate_hash_function("SHA256", hashlib.sha256, origin="hashlib")
+ try:
+ hashlib.new('ripemd160')
+ except ValueError:
+ pass
+ else:
+ def rmd160():
+ return hashlib.new('ripemd160')
+ rmd160hash = _generate_hash_function("RMD160", rmd160, origin="hashlib")
+except ImportError as e:
+ pass
+
+
+# Use python-fchksum if available, prefer it over all other MD5 implementations
+try:
+ import fchksum
+
+ def md5hash(filename):
+ return fchksum.fmd5t(filename)
+ hashfunc_map["MD5"] = md5hash
+ hashorigin_map["MD5"] = "python-fchksum"
+
+except ImportError:
+ pass
+
+# There is only one implementation for size
+def getsize(filename):
+ size = os.stat(filename).st_size
+ return (size, size)
+hashfunc_map["size"] = getsize
+
+# end actual hash functions
+
+prelink_capable = False
+if os.path.exists(PRELINK_BINARY):
+ results = portage.subprocess_getstatusoutput(
+ "%s --version > /dev/null 2>&1" % (PRELINK_BINARY,))
+ if (results[0] >> 8) == 0:
+ prelink_capable=1
+ del results
+
+def perform_md5(x, calc_prelink=0):
+ return perform_checksum(x, "MD5", calc_prelink)[0]
+
+def _perform_md5_merge(x, **kwargs):
+ return perform_md5(_unicode_encode(x,
+ encoding=_encodings['merge'], errors='strict'), **kwargs)
+
+def perform_all(x, calc_prelink=0):
+ mydict = {}
+ for k in hashfunc_map:
+ mydict[k] = perform_checksum(x, hashfunc_map[k], calc_prelink)[0]
+ return mydict
+
+def get_valid_checksum_keys():
+ return list(hashfunc_map)
+
+def get_hash_origin(hashtype):
+ if hashtype not in hashfunc_map:
+ raise KeyError(hashtype)
+ return hashorigin_map.get(hashtype, "unknown")
+
+def verify_all(filename, mydict, calc_prelink=0, strict=0):
+ """
+ Verify all checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
+ @type strict: Integer
+ @rtype: Tuple
+ @return: Result of the checks and possible message:
+ 1) If size fails, False, and a tuple containing a message, the given size, and the actual size
+ 2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
+ 3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
+ 4) If all checks succeed, return True and a fake reason
+ """
+ # Dict relates to single file only.
+ # returns: (passed,reason)
+ file_is_ok = True
+ reason = "Reason unknown"
+ try:
+ mysize = os.stat(filename)[stat.ST_SIZE]
+ if mydict["size"] != mysize:
+ return False,(_("Filesize does not match recorded size"), mysize, mydict["size"])
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise portage.exception.FileNotFound(filename)
+ return False, (str(e), None, None)
+
+ verifiable_hash_types = set(mydict).intersection(hashfunc_map)
+ verifiable_hash_types.discard("size")
+ if not verifiable_hash_types:
+ expected = set(hashfunc_map)
+ expected.discard("size")
+ expected = list(expected)
+ expected.sort()
+ expected = " ".join(expected)
+ got = set(mydict)
+ got.discard("size")
+ got = list(got)
+ got.sort()
+ got = " ".join(got)
+ return False, (_("Insufficient data for checksum verification"), got, expected)
+
+ for x in sorted(mydict):
+ if x == "size":
+ continue
+ elif x in hashfunc_map:
+ myhash = perform_checksum(filename, x, calc_prelink=calc_prelink)[0]
+ if mydict[x] != myhash:
+ if strict:
+ raise portage.exception.DigestException(
+ ("Failed to verify '$(file)s' on " + \
+ "checksum type '%(type)s'") % \
+ {"file" : filename, "type" : x})
+ else:
+ file_is_ok = False
+ reason = (("Failed on %s verification" % x), myhash,mydict[x])
+ break
+ return file_is_ok,reason
+
+def perform_checksum(filename, hashname="MD5", calc_prelink=0):
+ """
+ Run a specific checksum against a file. The filename can
+ be either unicode or an encoded byte string. If filename
+ is unicode then a UnicodeDecodeError will be raised if
+ necessary.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @param hashname: The type of hash function to run
+ @type hashname: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: The hash and size of the data
+ """
+ global prelink_capable
+ # Make sure filename is encoded with the correct encoding before
+ # it is passed to spawn (for prelink) and/or the hash function.
+ filename = _unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ myfilename = filename
+ prelink_tmpfile = None
+ try:
+ if calc_prelink and prelink_capable:
+ # Create non-prelinked temporary file to checksum.
+ # Files rejected by prelink are summed in place.
+ try:
+ tmpfile_fd, prelink_tmpfile = tempfile.mkstemp()
+ try:
+ retval = portage.process.spawn([PRELINK_BINARY,
+ "--verify", filename], fd_pipes={1:tmpfile_fd})
+ finally:
+ os.close(tmpfile_fd)
+ if retval == os.EX_OK:
+ myfilename = prelink_tmpfile
+ except portage.exception.CommandNotFound:
+ # This happens during uninstallation of prelink.
+ prelink_capable = False
+ try:
+ if hashname not in hashfunc_map:
+ raise portage.exception.DigestException(hashname + \
+ " hash function not available (needs dev-python/pycrypto)")
+ myhash, mysize = hashfunc_map[hashname](myfilename)
+ except (OSError, IOError) as e:
+ if e.errno == errno.ENOENT:
+ raise portage.exception.FileNotFound(myfilename)
+ raise
+ return myhash, mysize
+ finally:
+ if prelink_tmpfile:
+ try:
+ os.unlink(prelink_tmpfile)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+def perform_multiple_checksums(filename, hashes=["MD5"], calc_prelink=0):
+ """
+ Run a group of checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param hashes: A list of checksum functions to run against the file
+ @type hashname: List
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: A dictionary in the form:
+ return_value[hash_name] = (hash_result,size)
+ for each given checksum
+ """
+ rVal = {}
+ for x in hashes:
+ if x not in hashfunc_map:
+ raise portage.exception.DigestException(x+" hash function not available (needs dev-python/pycrypto or >=dev-lang/python-2.5)")
+ rVal[x] = perform_checksum(filename, x, calc_prelink)[0]
+ return rVal
diff --git a/portage_with_autodep/pym/portage/const.py b/portage_with_autodep/pym/portage/const.py
new file mode 100644
index 0000000..f34398d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/const.py
@@ -0,0 +1,143 @@
+# portage: Constants
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+
+# ===========================================================================
+# START OF CONSTANTS -- START OF CONSTANTS -- START OF CONSTANTS -- START OF
+# ===========================================================================
+
+# There are two types of variables here which can easily be confused,
+# resulting in arbitrary bugs, mainly exposed with an offset
+# installation (Prefix). The two types relate to the usage of
+# config_root or target_root.
+# The first, config_root (PORTAGE_CONFIGROOT), can be a path somewhere,
+# from which all derived paths need to be relative (e.g.
+# USER_CONFIG_PATH) without EPREFIX prepended in Prefix. This means
+# config_root can for instance be set to "$HOME/my/config". Obviously,
+# in such case it is not appropriate to prepend EPREFIX to derived
+# constants. The default value of config_root is EPREFIX (in non-Prefix
+# the empty string) -- overriding the value loses the EPREFIX as one
+# would expect.
+# Second there is target_root (ROOT) which is used to install somewhere
+# completely else, in Prefix of limited use. Because this is an offset
+# always given, the EPREFIX should always be applied in it, hence the
+# code always prefixes them with EROOT.
+# The variables in this file are grouped by config_root, target_root.
+
+# variables used with config_root (these need to be relative)
+MAKE_CONF_FILE = "etc/make.conf"
+USER_CONFIG_PATH = "etc/portage"
+MODULES_FILE_PATH = USER_CONFIG_PATH + "/modules"
+CUSTOM_PROFILE_PATH = USER_CONFIG_PATH + "/profile"
+USER_VIRTUALS_FILE = USER_CONFIG_PATH + "/virtuals"
+EBUILD_SH_ENV_FILE = USER_CONFIG_PATH + "/bashrc"
+EBUILD_SH_ENV_DIR = USER_CONFIG_PATH + "/env"
+CUSTOM_MIRRORS_FILE = USER_CONFIG_PATH + "/mirrors"
+COLOR_MAP_FILE = USER_CONFIG_PATH + "/color.map"
+PROFILE_PATH = "etc/make.profile"
+MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults" # FIXME: not used
+DEPRECATED_PROFILE_FILE = PROFILE_PATH + "/deprecated"
+
+# variables used with targetroot (these need to be absolute, but not
+# have a leading '/' since they are used directly with os.path.join on EROOT)
+VDB_PATH = "var/db/pkg"
+CACHE_PATH = "var/cache/edb"
+PRIVATE_PATH = "var/lib/portage"
+WORLD_FILE = PRIVATE_PATH + "/world"
+WORLD_SETS_FILE = PRIVATE_PATH + "/world_sets"
+CONFIG_MEMORY_FILE = PRIVATE_PATH + "/config"
+NEWS_LIB_PATH = "var/lib/gentoo"
+
+# these variables get EPREFIX prepended automagically when they are
+# translated into their lowercase variants
+DEPCACHE_PATH = "/var/cache/edb/dep"
+GLOBAL_CONFIG_PATH = "/usr/share/portage/config"
+
+# these variables are not used with target_root or config_root
+PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(__file__.split(os.sep)[:-3]))
+PORTAGE_BIN_PATH = PORTAGE_BASE_PATH + "/bin"
+PORTAGE_PYM_PATH = PORTAGE_BASE_PATH + "/pym"
+LOCALE_DATA_PATH = PORTAGE_BASE_PATH + "/locale" # FIXME: not used
+EBUILD_SH_BINARY = PORTAGE_BIN_PATH + "/ebuild.sh"
+MISC_SH_BINARY = PORTAGE_BIN_PATH + "/misc-functions.sh"
+SANDBOX_BINARY = "/usr/bin/sandbox"
+FAKEROOT_BINARY = "/usr/bin/fakeroot"
+BASH_BINARY = "/bin/bash"
+MOVE_BINARY = "/bin/mv"
+PRELINK_BINARY = "/usr/sbin/prelink"
+AUTODEP_LIBRARY = "/usr/lib/file_hook.so"
+
+
+INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env"
+REPO_NAME_FILE = "repo_name"
+REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
+
+PORTAGE_PACKAGE_ATOM = "sys-apps/portage"
+LIBC_PACKAGE_ATOM = "virtual/libc"
+OS_HEADERS_PACKAGE_ATOM = "virtual/os-headers"
+
+INCREMENTALS = ("USE", "USE_EXPAND", "USE_EXPAND_HIDDEN",
+ "FEATURES", "ACCEPT_KEYWORDS",
+ "CONFIG_PROTECT_MASK", "CONFIG_PROTECT",
+ "PRELINK_PATH", "PRELINK_PATH_MASK",
+ "PROFILE_ONLY_VARIABLES")
+EBUILD_PHASES = ("pretend", "setup", "unpack", "prepare", "configure",
+ "compile", "test", "install",
+ "package", "preinst", "postinst","prerm", "postrm",
+ "nofetch", "config", "info", "other")
+SUPPORTED_FEATURES = frozenset([
+ "allow-missing-manifests",
+ "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy",
+ "ccache", "chflags", "collision-protect", "compress-build-logs",
+ "depcheck", "depcheckstrict",
+ "digest", "distcc", "distcc-pump", "distlocks", "ebuild-locks", "fakeroot",
+ "fail-clean", "fixpackages", "force-mirror", "getbinpkg",
+ "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror",
+ "metadata-transfer", "mirror", "multilib-strict", "news",
+ "noauto", "noclean", "nodoc", "noinfo", "noman",
+ "nostrip", "notitles", "parallel-fetch", "parallel-install",
+ "parse-eapi-ebuild-head",
+ "prelink-checksums", "preserve-libs",
+ "protect-owned", "python-trace", "sandbox",
+ "selinux", "sesandbox", "sfperms",
+ "sign", "skiprocheck", "split-elog", "split-log", "splitdebug",
+ "strict", "stricter", "suidctl", "test", "test-fail-continue",
+ "unknown-features-filter", "unknown-features-warn",
+ "unmerge-logs", "unmerge-orphans", "userfetch", "userpriv",
+ "usersandbox", "usersync", "webrsync-gpg"])
+
+EAPI = 4
+
+HASHING_BLOCKSIZE = 32768
+MANIFEST1_HASH_FUNCTIONS = ("MD5", "SHA256", "RMD160")
+MANIFEST2_HASH_FUNCTIONS = ("SHA1", "SHA256", "RMD160")
+
+MANIFEST1_REQUIRED_HASH = "MD5"
+MANIFEST2_REQUIRED_HASH = "SHA1"
+
+MANIFEST2_IDENTIFIERS = ("AUX", "MISC", "DIST", "EBUILD")
+# ===========================================================================
+# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
+# ===========================================================================
+
+# Private constants for use in conditional code in order to minimize the diff
+# between branches.
+_ENABLE_DYN_LINK_MAP = True
+_ENABLE_PRESERVE_LIBS = True
+_ENABLE_REPO_NAME_WARN = True
+_ENABLE_SET_CONFIG = True
+_SANDBOX_COMPAT_LEVEL = "22"
+
+
+# The definitions above will differ between branches, so it's useful to have
+# common lines of diff context here in order to avoid merge conflicts.
+
+if not _ENABLE_PRESERVE_LIBS:
+ SUPPORTED_FEATURES = set(SUPPORTED_FEATURES)
+ SUPPORTED_FEATURES.remove("preserve-libs")
+ SUPPORTED_FEATURES = frozenset(SUPPORTED_FEATURES)
+
+if not _ENABLE_SET_CONFIG:
+ WORLD_SETS_FILE = '/dev/null'
diff --git a/portage_with_autodep/pym/portage/cvstree.py b/portage_with_autodep/pym/portage/cvstree.py
new file mode 100644
index 0000000..9ba22f3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cvstree.py
@@ -0,0 +1,293 @@
+# cvstree.py -- cvs tree utilities
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import io
+import re
+import stat
+import sys
+import time
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+# [D]/Name/Version/Date/Flags/Tags
+
+def pathdata(entries, path):
+ """(entries,path)
+ Returns the data(dict) for a specific file/dir at the path specified."""
+ mysplit=path.split("/")
+ myentries=entries
+ mytarget=mysplit[-1]
+ mysplit=mysplit[:-1]
+ for mys in mysplit:
+ if mys in myentries["dirs"]:
+ myentries=myentries["dirs"][mys]
+ else:
+ return None
+ if mytarget in myentries["dirs"]:
+ return myentries["dirs"][mytarget]
+ elif mytarget in myentries["files"]:
+ return myentries["files"][mytarget]
+ else:
+ return None
+
+def fileat(entries, path):
+ return pathdata(entries,path)
+
+def isadded(entries, path):
+ """(entries,path)
+ Returns true if the path exists and is added to the cvs tree."""
+ mytarget=pathdata(entries, path)
+ if mytarget:
+ if "cvs" in mytarget["status"]:
+ return 1
+
+ basedir=os.path.dirname(path)
+ filename=os.path.basename(path)
+
+ try:
+ myfile = io.open(
+ _unicode_encode(os.path.join(basedir, 'CVS', 'Entries'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ except IOError:
+ return 0
+ mylines=myfile.readlines()
+ myfile.close()
+
+ rep=re.compile("^\/"+re.escape(filename)+"\/");
+ for x in mylines:
+ if rep.search(x):
+ return 1
+
+ return 0
+
+def findnew(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that have been added but
+ have not yet been committed. Returns a list of paths, optionally prepended
+ with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "0" == entries["files"][myfile]["revision"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist+=findnew(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findoption(entries, pattern, recursive=0, basedir=""):
+ """(entries, pattern, recursive=0, basedir="")
+ Iterate over paths of cvs entries for which the pattern.search() method
+ finds a match. Returns a list of paths, optionally prepended with a
+ basedir."""
+ if not basedir.endswith("/"):
+ basedir += "/"
+ for myfile, mydata in entries["files"].items():
+ if "cvs" in mydata["status"]:
+ if pattern.search(mydata["flags"]):
+ yield basedir+myfile
+ if recursive:
+ for mydir, mydata in entries["dirs"].items():
+ for x in findoption(mydata, pattern,
+ recursive, basedir+mydir):
+ yield x
+
+def findchanged(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that exist in the cvs tree
+ and differ from the committed version. Returns a list of paths, optionally
+ prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "current" not in entries["files"][myfile]["status"]:
+ if "exists" in entries["files"][myfile]["status"]:
+ if entries["files"][myfile]["revision"]!="0":
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist+=findchanged(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findmissing(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are listed in the cvs
+ tree but do not exist on the filesystem. Returns a list of paths,
+ optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "exists" not in entries["files"][myfile]["status"]:
+ if "removed" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist+=findmissing(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findunadded(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are in valid cvs
+ directories but are not part of the cvs tree. Returns a list of paths,
+ optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+
+ #ignore what cvs ignores.
+ for myfile in entries["files"]:
+ if "cvs" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist+=findunadded(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findremoved(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are in flagged for cvs
+ deletions. Returns a list of paths, optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"]:
+ if "removed" in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist+=findremoved(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findall(entries, recursive=0, basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all new, changed, missing, and unadded
+ entities. Returns a 4 element list of lists as returned from each find*()."""
+
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mynew = findnew(entries,recursive,basedir)
+ mychanged = findchanged(entries,recursive,basedir)
+ mymissing = findmissing(entries,recursive,basedir)
+ myunadded = findunadded(entries,recursive,basedir)
+ myremoved = findremoved(entries,recursive,basedir)
+ return [mynew, mychanged, mymissing, myunadded, myremoved]
+
+ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
+def apply_cvsignore_filter(list):
+ x=0
+ while x < len(list):
+ if ignore_list.match(list[x].split("/")[-1]):
+ list.pop(x)
+ else:
+ x+=1
+ return list
+
+def getentries(mydir,recursive=0):
+ """(basedir,recursive=0)
+ Scans the given directory and returns a datadict of all the entries in
+ the directory separated as a dirs dict and a files dict."""
+ myfn=mydir+"/CVS/Entries"
+ # entries=[dirs, files]
+ entries={"dirs":{},"files":{}}
+ if not os.path.exists(mydir):
+ return entries
+ try:
+ myfile = io.open(_unicode_encode(myfn,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ mylines=myfile.readlines()
+ myfile.close()
+ except SystemExit as e:
+ raise
+ except:
+ mylines=[]
+ for line in mylines:
+ if line and line[-1]=="\n":
+ line=line[:-1]
+ if not line:
+ continue
+ if line=="D": # End of entries file
+ break
+ mysplit=line.split("/")
+ if len(mysplit)!=6:
+ print("Confused:",mysplit)
+ continue
+ if mysplit[0]=="D":
+ entries["dirs"][mysplit[1]]={"dirs":{},"files":{},"status":[]}
+ entries["dirs"][mysplit[1]]["status"]=["cvs"]
+ if os.path.isdir(mydir+"/"+mysplit[1]):
+ entries["dirs"][mysplit[1]]["status"]+=["exists"]
+ entries["dirs"][mysplit[1]]["flags"]=mysplit[2:]
+ if recursive:
+ rentries=getentries(mydir+"/"+mysplit[1],recursive)
+ entries["dirs"][mysplit[1]]["dirs"]=rentries["dirs"]
+ entries["dirs"][mysplit[1]]["files"]=rentries["files"]
+ else:
+ # [D]/Name/revision/Date/Flags/Tags
+ entries["files"][mysplit[1]]={}
+ entries["files"][mysplit[1]]["revision"]=mysplit[2]
+ entries["files"][mysplit[1]]["date"]=mysplit[3]
+ entries["files"][mysplit[1]]["flags"]=mysplit[4]
+ entries["files"][mysplit[1]]["tags"]=mysplit[5]
+ entries["files"][mysplit[1]]["status"]=["cvs"]
+ if entries["files"][mysplit[1]]["revision"][0]=="-":
+ entries["files"][mysplit[1]]["status"]+=["removed"]
+
+ for file in apply_cvsignore_filter(os.listdir(mydir)):
+ if file=="CVS":
+ continue
+ if os.path.isdir(mydir+"/"+file):
+ if file not in entries["dirs"]:
+ entries["dirs"][file]={"dirs":{},"files":{}}
+ # It's normal for a directory to be unlisted in Entries
+ # when checked out without -P (see bug #257660).
+ rentries=getentries(mydir+"/"+file,recursive)
+ entries["dirs"][file]["dirs"]=rentries["dirs"]
+ entries["dirs"][file]["files"]=rentries["files"]
+ if "status" in entries["dirs"][file]:
+ if "exists" not in entries["dirs"][file]["status"]:
+ entries["dirs"][file]["status"]+=["exists"]
+ else:
+ entries["dirs"][file]["status"]=["exists"]
+ elif os.path.isfile(mydir+"/"+file):
+ if file not in entries["files"]:
+ entries["files"][file]={"revision":"","date":"","flags":"","tags":""}
+ if "status" in entries["files"][file]:
+ if "exists" not in entries["files"][file]["status"]:
+ entries["files"][file]["status"]+=["exists"]
+ else:
+ entries["files"][file]["status"]=["exists"]
+ try:
+ mystat=os.stat(mydir+"/"+file)
+ mytime = time.asctime(time.gmtime(mystat[stat.ST_MTIME]))
+ if "status" not in entries["files"][file]:
+ entries["files"][file]["status"]=[]
+ if mytime==entries["files"][file]["date"]:
+ entries["files"][file]["status"]+=["current"]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ print("failed to stat",file)
+ print(e)
+ return
+
+ else:
+ print()
+ print("File of unknown type:",mydir+"/"+file)
+ print()
+ return entries
diff --git a/portage_with_autodep/pym/portage/data.py b/portage_with_autodep/pym/portage/data.py
new file mode 100644
index 0000000..c38fa17
--- /dev/null
+++ b/portage_with_autodep/pym/portage/data.py
@@ -0,0 +1,122 @@
+# data.py -- Calculated/Discovered Data Values
+# Copyright 1998-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os, pwd, grp, platform
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.output:colorize',
+ 'portage.util:writemsg',
+)
+from portage.localization import _
+
+ostype=platform.system()
+userland = None
+if ostype == "DragonFly" or ostype.endswith("BSD"):
+ userland = "BSD"
+else:
+ userland = "GNU"
+
+lchown = getattr(os, "lchown", None)
+
+if not lchown:
+ if ostype == "Darwin":
+ def lchown(*pos_args, **key_args):
+ pass
+ else:
+ def lchown(*pargs, **kwargs):
+ writemsg(colorize("BAD", "!!!") + _(
+ " It seems that os.lchown does not"
+ " exist. Please rebuild python.\n"), noiselevel=-1)
+ lchown()
+
+lchown = portage._unicode_func_wrapper(lchown)
+
+def portage_group_warning():
+ warn_prefix = colorize("BAD", "*** WARNING *** ")
+ mylines = [
+ "For security reasons, only system administrators should be",
+ "allowed in the portage group. Untrusted users or processes",
+ "can potentially exploit the portage group for attacks such as",
+ "local privilege escalation."
+ ]
+ for x in mylines:
+ writemsg(warn_prefix, noiselevel=-1)
+ writemsg(x, noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+# Portage has 3 security levels that depend on the uid and gid of the main
+# process and are assigned according to the following table:
+#
+# Privileges secpass uid gid
+# normal 0 any any
+# group 1 any portage_gid
+# super 2 0 any
+#
+# If the "wheel" group does not exist then wheelgid falls back to 0.
+# If the "portage" group does not exist then portage_uid falls back to wheelgid.
+
+secpass=0
+
+uid=os.getuid()
+wheelgid=0
+
+if uid==0:
+ secpass=2
+try:
+ wheelgid=grp.getgrnam("wheel")[2]
+except KeyError:
+ pass
+
+# Allow the overriding of the user used for 'userpriv' and 'userfetch'
+_portage_uname = os.environ.get('PORTAGE_USERNAME', 'portage')
+_portage_grpname = os.environ.get('PORTAGE_GRPNAME', 'portage')
+
+#Discover the uid and gid of the portage user/group
+try:
+ portage_uid = pwd.getpwnam(_portage_uname)[2]
+ portage_gid = grp.getgrnam(_portage_grpname)[2]
+ if secpass < 1 and portage_gid in os.getgroups():
+ secpass=1
+except KeyError:
+ portage_uid=0
+ portage_gid=0
+ userpriv_groups = [portage_gid]
+ writemsg(colorize("BAD",
+ _("portage: 'portage' user or group missing.")) + "\n", noiselevel=-1)
+ writemsg(_(
+ " For the defaults, line 1 goes into passwd, "
+ "and 2 into group.\n"), noiselevel=-1)
+ writemsg(colorize("GOOD",
+ " portage:x:250:250:portage:/var/tmp/portage:/bin/false") \
+ + "\n", noiselevel=-1)
+ writemsg(colorize("GOOD", " portage::250:portage") + "\n",
+ noiselevel=-1)
+ portage_group_warning()
+else:
+ userpriv_groups = [portage_gid]
+ if secpass >= 2:
+ class _LazyUserprivGroups(portage.proxy.objectproxy.ObjectProxy):
+ def _get_target(self):
+ global userpriv_groups
+ if userpriv_groups is not self:
+ return userpriv_groups
+ userpriv_groups = _userpriv_groups
+ # Get a list of group IDs for the portage user. Do not use
+ # grp.getgrall() since it is known to trigger spurious
+ # SIGPIPE problems with nss_ldap.
+ mystatus, myoutput = \
+ portage.subprocess_getstatusoutput("id -G %s" % _portage_uname)
+ if mystatus == os.EX_OK:
+ for x in myoutput.split():
+ try:
+ userpriv_groups.append(int(x))
+ except ValueError:
+ pass
+ userpriv_groups[:] = sorted(set(userpriv_groups))
+ return userpriv_groups
+
+ _userpriv_groups = userpriv_groups
+ userpriv_groups = _LazyUserprivGroups()
diff --git a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
new file mode 100644
index 0000000..34ed031
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
@@ -0,0 +1,282 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import shutil
+import signal
+import tempfile
+import traceback
+
+import errno
+import fcntl
+import portage
+from portage import os, _unicode_decode
+from portage.const import PORTAGE_PACKAGE_ATOM
+from portage.dep import match_from_list
+import portage.elog.messages
+from portage.elog import _preload_elog_modules
+from portage.util import ensure_dirs
+from _emerge.PollConstants import PollConstants
+from _emerge.SpawnProcess import SpawnProcess
+
+class MergeProcess(SpawnProcess):
+ """
+ Merge packages in a subprocess, so the Scheduler can run in the main
+ thread while files are moved or copied asynchronously.
+ """
+
+ __slots__ = ('mycat', 'mypkg', 'settings', 'treetype',
+ 'vartree', 'scheduler', 'blockers', 'pkgloc', 'infloc', 'myebuild',
+ 'mydbapi', 'prev_mtimes', 'unmerge', '_elog_reader_fd', '_elog_reg_id',
+ '_buf', '_elog_keys', '_locked_vdb')
+
+ def _start(self):
+ # Portage should always call setcpv prior to this
+ # point, but here we have a fallback as a convenience
+ # for external API consumers. It's important that
+ # this metadata access happens in the parent process,
+ # since closing of file descriptors in the subprocess
+ # can prevent access to open database connections such
+ # as that used by the sqlite metadata cache module.
+ cpv = "%s/%s" % (self.mycat, self.mypkg)
+ settings = self.settings
+ if cpv != settings.mycpv or \
+ "EAPI" not in settings.configdict["pkg"]:
+ settings.reload()
+ settings.reset()
+ settings.setcpv(cpv, mydb=self.mydbapi)
+
+ if not self.unmerge:
+ self._handle_self_reinstall()
+ super(MergeProcess, self)._start()
+
+ def _lock_vdb(self):
+ """
+ Lock the vdb if FEATURES=parallel-install is NOT enabled,
+ otherwise do nothing. This is implemented with
+ vardbapi.lock(), which supports reentrance by the
+ subprocess that we spawn.
+ """
+ if "parallel-install" not in self.settings.features:
+ self.vartree.dbapi.lock()
+ self._locked_vdb = True
+
+ def _unlock_vdb(self):
+ """
+ Unlock the vdb if we hold a lock, otherwise do nothing.
+ """
+ if self._locked_vdb:
+ self.vartree.dbapi.unlock()
+ self._locked_vdb = False
+
+ def _handle_self_reinstall(self):
+ """
+ If portage is reinstalling itself, create temporary
+ copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
+ to avoid relying on the new versions which may be
+ incompatible. Register an atexit hook to clean up the
+ temporary directories. Pre-load elog modules here since
+ we won't be able to later if they get unmerged (happens
+ when namespace changes).
+ """
+
+ settings = self.settings
+ cpv = settings.mycpv
+ reinstall_self = False
+ if self.settings["ROOT"] == "/" and \
+ match_from_list(PORTAGE_PACKAGE_ATOM, [cpv]):
+ inherited = frozenset(self.settings.get('INHERITED', '').split())
+ if not self.vartree.dbapi.cpv_exists(cpv) or \
+ '9999' in cpv or \
+ 'git' in inherited or \
+ 'git-2' in inherited:
+ reinstall_self = True
+
+ if reinstall_self:
+ # Load lazily referenced portage submodules into memory,
+ # so imports won't fail during portage upgrade/downgrade.
+ _preload_elog_modules(self.settings)
+ portage.proxy.lazyimport._preload_portage_submodules()
+
+ # Make the temp directory inside $PORTAGE_TMPDIR/portage, since
+ # it's common for /tmp and /var/tmp to be mounted with the
+ # "noexec" option (see bug #346899).
+ build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
+ ensure_dirs(build_prefix)
+ base_path_tmp = tempfile.mkdtemp(
+ "", "._portage_reinstall_.", build_prefix)
+ portage.process.atexit_register(shutil.rmtree, base_path_tmp)
+ dir_perms = 0o755
+ for subdir in "bin", "pym":
+ var_name = "PORTAGE_%s_PATH" % subdir.upper()
+ var_orig = settings[var_name]
+ var_new = os.path.join(base_path_tmp, subdir)
+ settings[var_name] = var_new
+ settings.backup_changes(var_name)
+ shutil.copytree(var_orig, var_new, symlinks=True)
+ os.chmod(var_new, dir_perms)
+ portage._bin_path = settings['PORTAGE_BIN_PATH']
+ portage._pym_path = settings['PORTAGE_PYM_PATH']
+ os.chmod(base_path_tmp, dir_perms)
+
+ def _elog_output_handler(self, fd, event):
+ output = None
+ if event & PollConstants.POLLIN:
+ try:
+ output = os.read(fd, self._bufsize)
+ except OSError as e:
+ if e.errno not in (errno.EAGAIN, errno.EINTR):
+ raise
+ if output:
+ lines = _unicode_decode(output).split('\n')
+ if len(lines) == 1:
+ self._buf += lines[0]
+ else:
+ lines[0] = self._buf + lines[0]
+ self._buf = lines.pop()
+ out = io.StringIO()
+ for line in lines:
+ funcname, phase, key, msg = line.split(' ', 3)
+ self._elog_keys.add(key)
+ reporter = getattr(portage.elog.messages, funcname)
+ reporter(msg, phase=phase, key=key, out=out)
+
+ def _spawn(self, args, fd_pipes, **kwargs):
+ """
+ Fork a subprocess, apply local settings, and call
+ dblink.merge().
+ """
+
+ elog_reader_fd, elog_writer_fd = os.pipe()
+ fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
+ fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+ blockers = None
+ if self.blockers is not None:
+ # Query blockers in the main process, since closing
+ # of file descriptors in the subprocess can prevent
+ # access to open database connections such as that
+ # used by the sqlite metadata cache module.
+ blockers = self.blockers()
+ mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
+ treetype=self.treetype, vartree=self.vartree,
+ blockers=blockers, scheduler=self.scheduler,
+ pipe=elog_writer_fd)
+ fd_pipes[elog_writer_fd] = elog_writer_fd
+ self._elog_reg_id = self.scheduler.register(elog_reader_fd,
+ self._registered_events, self._elog_output_handler)
+
+ # If a concurrent emerge process tries to install a package
+ # in the same SLOT as this one at the same time, there is an
+ # extremely unlikely chance that the COUNTER values will not be
+ # ordered correctly unless we lock the vdb here.
+ # FEATURES=parallel-install skips this lock in order to
+ # improve performance, and the risk is practically negligible.
+ self._lock_vdb()
+ counter = None
+ if not self.unmerge:
+ counter = self.vartree.dbapi.counter_tick()
+
+ pid = os.fork()
+ if pid != 0:
+ os.close(elog_writer_fd)
+ self._elog_reader_fd = elog_reader_fd
+ self._buf = ""
+ self._elog_keys = set()
+
+ # invalidate relevant vardbapi caches
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+ self.vartree.dbapi._pkgs_changed = True
+ self.vartree.dbapi._clear_pkg_cache(mylink)
+
+ portage.process.spawned_pids.append(pid)
+ return [pid]
+
+ os.close(elog_reader_fd)
+ portage.process._setup_pipes(fd_pipes)
+
+ # Use default signal handlers since the ones inherited
+ # from the parent process are irrelevant here.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ portage.output.havecolor = self.settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ # In this subprocess we want mylink._display_merge() to use
+ # stdout/stderr directly since they are pipes. This behavior
+ # is triggered when mylink._scheduler is None.
+ mylink._scheduler = None
+
+ # Avoid wastful updates of the vdb cache.
+ self.vartree.dbapi._flush_cache_enabled = False
+
+ # In this subprocess we don't want PORTAGE_BACKGROUND to
+ # suppress stdout/stderr output since they are pipes. We
+ # also don't want to open PORTAGE_LOG_FILE, since it will
+ # already be opened by the parent process, so we set the
+ # "subprocess" value for use in conditional logging code
+ # involving PORTAGE_LOG_FILE.
+ if not self.unmerge:
+ # unmerge phases have separate logs
+ if self.settings.get("PORTAGE_BACKGROUND") == "1":
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
+ else:
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
+ self.settings["PORTAGE_BACKGROUND"] = "subprocess"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+
+ rval = 1
+ try:
+ if self.unmerge:
+ if not mylink.exists():
+ rval = os.EX_OK
+ elif mylink.unmerge(
+ ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ rval = os.EX_OK
+ else:
+ rval = mylink.merge(self.pkgloc, self.infloc,
+ myebuild=self.myebuild, mydbapi=self.mydbapi,
+ prev_mtimes=self.prev_mtimes, counter=counter)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ finally:
+ # Call os._exit() from finally block, in order to suppress any
+ # finally blocks from earlier in the call stack. See bug #345289.
+ os._exit(rval)
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ if not self.unmerge:
+ # Populate the vardbapi cache for the new package
+ # while its inodes are still hot.
+ try:
+ self.vartree.dbapi.aux_get(self.settings.mycpv, ["EAPI"])
+ except KeyError:
+ pass
+
+ self._unlock_vdb()
+ if self._elog_reg_id is not None:
+ self.scheduler.unregister(self._elog_reg_id)
+ self._elog_reg_id = None
+ if self._elog_reader_fd:
+ os.close(self._elog_reader_fd)
+ self._elog_reader_fd = None
+ if self._elog_keys is not None:
+ for key in self._elog_keys:
+ portage.elog.elog_process(key, self.settings,
+ phasefilter=("prerm", "postrm"))
+ self._elog_keys = None
+
+ super(MergeProcess, self)._unregister()
diff --git a/portage_with_autodep/pym/portage/dbapi/__init__.py b/portage_with_autodep/pym/portage/dbapi/__init__.py
new file mode 100644
index 0000000..e386faa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/__init__.py
@@ -0,0 +1,302 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["dbapi"]
+
+import re
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dbapi.dep_expand:dep_expand@_dep_expand',
+ 'portage.dep:match_from_list',
+ 'portage.output:colorize',
+ 'portage.util:cmp_sort_key,writemsg',
+ 'portage.versions:catsplit,catpkgsplit,vercmp',
+)
+
+from portage import os
+from portage import auxdbkeys
+from portage.localization import _
+
+class dbapi(object):
+ _category_re = re.compile(r'^\w[-.+\w]*$')
+ _categories = None
+ _use_mutable = False
+ _known_keys = frozenset(x for x in auxdbkeys
+ if not x.startswith("UNUSED_0"))
+ def __init__(self):
+ pass
+
+ @property
+ def categories(self):
+ """
+ Use self.cp_all() to generate a category list. Mutable instances
+ can delete the self._categories attribute in cases when the cached
+ categories become invalid and need to be regenerated.
+ """
+ if self._categories is not None:
+ return self._categories
+ self._categories = tuple(sorted(set(catsplit(x)[0] \
+ for x in self.cp_all())))
+ return self._categories
+
+ def close_caches(self):
+ pass
+
+ def cp_list(self, cp, use_cache=1):
+ raise NotImplementedError(self)
+
+ def _cpv_sort_ascending(self, cpv_list):
+ """
+ Use this to sort self.cp_list() results in ascending
+ order. It sorts in place and returns None.
+ """
+ if len(cpv_list) > 1:
+ # If the cpv includes explicit -r0, it has to be preserved
+ # for consistency in findname and aux_get calls, so use a
+ # dict to map strings back to their original values.
+ ver_map = {}
+ for cpv in cpv_list:
+ ver_map[cpv] = '-'.join(catpkgsplit(cpv)[2:])
+ def cmp_cpv(cpv1, cpv2):
+ return vercmp(ver_map[cpv1], ver_map[cpv2])
+ cpv_list.sort(key=cmp_sort_key(cmp_cpv))
+
+ def cpv_all(self):
+ """Return all CPVs in the db
+ Args:
+ None
+ Returns:
+ A list of Strings, 1 per CPV
+
+ This function relies on a subclass implementing cp_all, this is why the hasattr is there
+ """
+
+ if not hasattr(self, "cp_all"):
+ raise NotImplementedError
+ cpv_list = []
+ for cp in self.cp_all():
+ cpv_list.extend(self.cp_list(cp))
+ return cpv_list
+
+ def cp_all(self):
+ """ Implement this in a child class
+ Args
+ None
+ Returns:
+ A list of strings 1 per CP in the datastore
+ """
+ return NotImplementedError
+
+ def aux_get(self, mycpv, mylist, myrepo=None):
+ """Return the metadata keys in mylist for mycpv
+ Args:
+ mycpv - "sys-apps/foo-1.0"
+ mylist - ["SLOT","DEPEND","HOMEPAGE"]
+ myrepo - The repository name.
+ Returns:
+ a list of results, in order of keys in mylist, such as:
+ ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
+ """
+ raise NotImplementedError
+
+ def aux_update(self, cpv, metadata_updates):
+ """
+ Args:
+ cpv - "sys-apps/foo-1.0"
+ metadata_updates = { key : newvalue }
+ Returns:
+ None
+ """
+ raise NotImplementedError
+
+ def match(self, origdep, use_cache=1):
+ """Given a dependency, try to find packages that match
+ Args:
+ origdep - Depend atom
+ use_cache - Boolean indicating if we should use the cache or not
+ NOTE: Do we ever not want the cache?
+ Returns:
+ a list of packages that match origdep
+ """
+ mydep = _dep_expand(origdep, mydb=self, settings=self.settings)
+ return list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+
+ def _iter_match(self, atom, cpv_iter):
+ cpv_iter = iter(match_from_list(atom, cpv_iter))
+ if atom.slot:
+ cpv_iter = self._iter_match_slot(atom, cpv_iter)
+ if atom.unevaluated_atom.use:
+ cpv_iter = self._iter_match_use(atom, cpv_iter)
+ if atom.repo:
+ cpv_iter = self._iter_match_repo(atom, cpv_iter)
+ return cpv_iter
+
+ def _iter_match_repo(self, atom, cpv_iter):
+ for cpv in cpv_iter:
+ try:
+ if self.aux_get(cpv, ["repository"], myrepo=atom.repo)[0] == atom.repo:
+ yield cpv
+ except KeyError:
+ continue
+
+ def _iter_match_slot(self, atom, cpv_iter):
+ for cpv in cpv_iter:
+ try:
+ if self.aux_get(cpv, ["SLOT"], myrepo=atom.repo)[0] == atom.slot:
+ yield cpv
+ except KeyError:
+ continue
+
+ def _iter_match_use(self, atom, cpv_iter):
+ """
+ 1) Check for required IUSE intersection (need implicit IUSE here).
+ 2) Check enabled/disabled flag states.
+ """
+
+ iuse_implicit_match = self.settings._iuse_implicit_match
+ for cpv in cpv_iter:
+ try:
+ iuse, slot, use = self.aux_get(cpv, ["IUSE", "SLOT", "USE"], myrepo=atom.repo)
+ except KeyError:
+ continue
+ iuse = frozenset(x.lstrip('+-') for x in iuse.split())
+ missing_iuse = False
+ for x in atom.unevaluated_atom.use.required:
+ if x not in iuse and not iuse_implicit_match(x):
+ missing_iuse = True
+ break
+ if missing_iuse:
+ continue
+ if not atom.use:
+ pass
+ elif not self._use_mutable:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption).
+ use = frozenset(x for x in use.split() if x in iuse or \
+ iuse_implicit_match(x))
+ missing_enabled = atom.use.missing_enabled.difference(iuse)
+ missing_disabled = atom.use.missing_disabled.difference(iuse)
+
+ if atom.use.enabled:
+ if atom.use.enabled.intersection(missing_disabled):
+ continue
+ need_enabled = atom.use.enabled.difference(use)
+ if need_enabled:
+ need_enabled = need_enabled.difference(missing_enabled)
+ if need_enabled:
+ continue
+
+ if atom.use.disabled:
+ if atom.use.disabled.intersection(missing_enabled):
+ continue
+ need_disabled = atom.use.disabled.intersection(use)
+ if need_disabled:
+ need_disabled = need_disabled.difference(missing_disabled)
+ if need_disabled:
+ continue
+ else:
+ # Check masked and forced flags for repoman.
+ mysettings = getattr(self, 'settings', None)
+ if mysettings is not None and not mysettings.local_config:
+
+ pkg = "%s:%s" % (cpv, slot)
+ usemask = mysettings._getUseMask(pkg)
+ if usemask.intersection(atom.use.enabled):
+ continue
+
+ useforce = mysettings._getUseForce(pkg).difference(usemask)
+ if useforce.intersection(atom.use.disabled):
+ continue
+
+ yield cpv
+
+ def invalidentry(self, mypath):
+ if '/-MERGING-' in mypath:
+ if os.path.exists(mypath):
+ writemsg(colorize("BAD", _("INCOMPLETE MERGE:"))+" %s\n" % mypath,
+ noiselevel=-1)
+ else:
+ writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
+
+ def update_ents(self, updates, onProgress=None, onUpdate=None):
+ """
+ Update metadata of all packages for package moves.
+ @param updates: A list of move commands, or dict of {repo_name: list}
+ @type updates: list or dict
+ @param onProgress: A progress callback function
+ @type onProgress: a callable that takes 2 integer arguments: maxval and curval
+ @param onUpdate: A progress callback function called only
+ for packages that are modified by updates.
+ @type onUpdate: a callable that takes 2 integer arguments:
+ maxval and curval
+ """
+ cpv_all = self.cpv_all()
+ cpv_all.sort()
+ maxval = len(cpv_all)
+ aux_get = self.aux_get
+ aux_update = self.aux_update
+ meta_keys = ["DEPEND", "RDEPEND", "PDEPEND", "PROVIDE", 'repository']
+ repo_dict = None
+ if isinstance(updates, dict):
+ repo_dict = updates
+ from portage.update import update_dbentries
+ if onUpdate:
+ onUpdate(maxval, 0)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, cpv in enumerate(cpv_all):
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ repo = metadata.pop('repository')
+ if repo_dict is None:
+ updates_list = updates
+ else:
+ try:
+ updates_list = repo_dict[repo]
+ except KeyError:
+ try:
+ updates_list = repo_dict['DEFAULT']
+ except KeyError:
+ continue
+
+ if not updates_list:
+ continue
+
+ metadata_updates = update_dbentries(updates_list, metadata)
+ if metadata_updates:
+ aux_update(cpv, metadata_updates)
+ if onUpdate:
+ onUpdate(maxval, i+1)
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ def move_slot_ent(self, mylist, repo_match=None):
+ """This function takes a sequence:
+ Args:
+ mylist: a sequence of (package, originalslot, newslot)
+ repo_match: callable that takes single repo_name argument
+ and returns True if the update should be applied
+ Returns:
+ The number of slotmoves this function did
+ """
+ pkg = mylist[1]
+ origslot = mylist[2]
+ newslot = mylist[3]
+ origmatches = self.match(pkg)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ slot = self.aux_get(mycpv, ["SLOT"])[0]
+ if slot != origslot:
+ continue
+ if repo_match is not None \
+ and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
+ continue
+ moves += 1
+ mydata = {"SLOT": newslot+"\n"}
+ self.aux_update(mycpv, mydata)
+ return moves
diff --git a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
new file mode 100644
index 0000000..6d6a27d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
@@ -0,0 +1,72 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.dep import Atom, _get_useflag_re
+
+def expand_new_virt(vardb, atom):
+ """
+ Iterate over the recursively expanded RDEPEND atoms of
+ a new-style virtual. If atom is not a new-style virtual
+ or it does not match an installed package then it is
+ yielded without any expansion.
+ """
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+
+ if not atom.cp.startswith("virtual/"):
+ yield atom
+ return
+
+ traversed = set()
+ stack = [atom]
+
+ while stack:
+ atom = stack.pop()
+ if atom.blocker or \
+ not atom.cp.startswith("virtual/"):
+ yield atom
+ continue
+
+ matches = vardb.match(atom)
+ if not (matches and matches[-1].startswith("virtual/")):
+ yield atom
+ continue
+
+ virt_cpv = matches[-1]
+ if virt_cpv in traversed:
+ continue
+
+ traversed.add(virt_cpv)
+ eapi, iuse, rdepend, use = vardb.aux_get(virt_cpv,
+ ["EAPI", "IUSE", "RDEPEND", "USE"])
+ if not portage.eapi_is_supported(eapi):
+ yield atom
+ continue
+
+ # Validate IUSE and IUSE, for early detection of vardb corruption.
+ useflag_re = _get_useflag_re(eapi)
+ valid_iuse = []
+ for x in iuse.split():
+ if x[:1] in ("+", "-"):
+ x = x[1:]
+ if useflag_re.match(x) is not None:
+ valid_iuse.append(x)
+ valid_iuse = frozenset(valid_iuse)
+
+ iuse_implicit_match = vardb.settings._iuse_implicit_match
+ valid_use = []
+ for x in use.split():
+ if x in valid_iuse or iuse_implicit_match(x):
+ valid_use.append(x)
+ valid_use = frozenset(valid_use)
+
+ success, atoms = portage.dep_check(rdepend,
+ None, vardb.settings, myuse=valid_use,
+ myroot=vardb.root, trees={vardb.root:{"porttree":vardb.vartree,
+ "vartree":vardb.vartree}})
+
+ if success:
+ stack.extend(atoms)
+ else:
+ yield atom
diff --git a/portage_with_autodep/pym/portage/dbapi/bintree.py b/portage_with_autodep/pym/portage/dbapi/bintree.py
new file mode 100644
index 0000000..62fc623
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/bintree.py
@@ -0,0 +1,1366 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["bindbapi", "binarytree"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum:hashfunc_map,perform_multiple_checksums,verify_all',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dep:dep_getkey,isjustname,match_from_list',
+ 'portage.output:EOutput,colorize',
+ 'portage.locks:lockfile,unlockfile',
+ 'portage.package.ebuild.doebuild:_vdb_use_conditional_atoms',
+ 'portage.package.ebuild.fetch:_check_distfile',
+ 'portage.update:update_dbentries',
+ 'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
+ 'writemsg,writemsg_stdout',
+ 'portage.util.listdir:listdir',
+ 'portage.versions:best,catpkgsplit,catsplit',
+)
+
+from portage.cache.mappings import slot_dict_class
+from portage.const import CACHE_PATH
+from portage.dbapi.virtual import fakedbapi
+from portage.dep import Atom, use_reduce, paren_enclose
+from portage.exception import AlarmSignal, InvalidPackageName, \
+ PermissionDenied, PortageException
+from portage.localization import _
+from portage import _movefile
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+import codecs
+import errno
+import io
+import re
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+from itertools import chain
+try:
+ from urllib.parse import urlparse
+ from urllib.request import urlopen as urllib_request_urlopen
+except ImportError:
+ from urlparse import urlparse
+ from urllib import urlopen as urllib_request_urlopen
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class bindbapi(fakedbapi):
+ _known_keys = frozenset(list(fakedbapi._known_keys) + \
+ ["CHOST", "repository", "USE"])
+ def __init__(self, mybintree=None, **kwargs):
+ fakedbapi.__init__(self, **kwargs)
+ self.bintree = mybintree
+ self.move_ent = mybintree.move_ent
+ self.cpvdict={}
+ self.cpdict={}
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(
+ ["BUILD_TIME", "CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
+ "RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES",
+ "REQUIRED_USE"])
+ self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
+ self._aux_cache = {}
+
+ def match(self, *pargs, **kwargs):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.match(self, *pargs, **kwargs)
+
+ def cpv_exists(self, cpv, myrepo=None):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_exists(self, cpv)
+
+ def cpv_inject(self, cpv, **kwargs):
+ self._aux_cache.pop(cpv, None)
+ fakedbapi.cpv_inject(self, cpv, **kwargs)
+
+ def cpv_remove(self, cpv):
+ self._aux_cache.pop(cpv, None)
+ fakedbapi.cpv_remove(self, cpv)
+
+ def aux_get(self, mycpv, wants, myrepo=None):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ cache_me = False
+ if not self._known_keys.intersection(
+ wants).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ return [aux_cache.get(x, "") for x in wants]
+ cache_me = True
+ mysplit = mycpv.split("/")
+ mylist = []
+ tbz2name = mysplit[1]+".tbz2"
+ if not self.bintree._remotepkgs or \
+ not self.bintree.isremote(mycpv):
+ tbz2_path = self.bintree.getname(mycpv)
+ if not os.path.exists(tbz2_path):
+ raise KeyError(mycpv)
+ metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
+ def getitem(k):
+ v = metadata_bytes.get(_unicode_encode(k,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace'))
+ if v is not None:
+ v = _unicode_decode(v,
+ encoding=_encodings['repo.content'], errors='replace')
+ return v
+ else:
+ getitem = self.bintree._remotepkgs[mycpv].get
+ mydata = {}
+ mykeys = wants
+ if cache_me:
+ mykeys = self._aux_cache_keys.union(wants)
+ for x in mykeys:
+ myval = getitem(x)
+ # myval is None if the key doesn't exist
+ # or the tbz2 is corrupt.
+ if myval:
+ mydata[x] = " ".join(myval.split())
+
+ if not mydata.setdefault('EAPI', _unicode_decode('0')):
+ mydata['EAPI'] = _unicode_decode('0')
+
+ if cache_me:
+ aux_cache = self._aux_cache_slot_dict()
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, _unicode_decode(''))
+ self._aux_cache[mycpv] = aux_cache
+ return [mydata.get(x, _unicode_decode('')) for x in wants]
+
+ def aux_update(self, cpv, values):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ tbz2path = self.bintree.getname(cpv)
+ if not os.path.exists(tbz2path):
+ raise KeyError(cpv)
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+
+ for k, v in values.items():
+ k = _unicode_encode(k,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ v = _unicode_encode(v,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mydata[k] = v
+
+ for k, v in list(mydata.items()):
+ if not v:
+ del mydata[k]
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+ # inject will clear stale caches via cpv_inject.
+ self.bintree.inject(cpv)
+
+ def cp_list(self, *pargs, **kwargs):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_list(self, *pargs, **kwargs)
+
+ def cp_all(self):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_all(self)
+
+ def cpv_all(self):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_all(self)
+
+def _pkgindex_cpv_map_latest_build(pkgindex):
+ """
+ Given a PackageIndex instance, create a dict of cpv -> metadata map.
+ If multiple packages have identical CPV values, prefer the package
+ with latest BUILD_TIME value.
+ @param pkgindex: A PackageIndex instance.
+ @type pkgindex: PackageIndex
+ @rtype: dict
+ @returns: a dict containing entry for the give cpv.
+ """
+ cpv_map = {}
+
+ for d in pkgindex.packages:
+ cpv = d["CPV"]
+
+ btime = d.get('BUILD_TIME', '')
+ try:
+ btime = int(btime)
+ except ValueError:
+ btime = None
+
+ other_d = cpv_map.get(cpv)
+ if other_d is not None:
+ other_btime = other_d.get('BUILD_TIME', '')
+ try:
+ other_btime = int(other_btime)
+ except ValueError:
+ other_btime = None
+ if other_btime and (not btime or other_btime > btime):
+ continue
+
+ cpv_map[cpv] = d
+
+ return cpv_map
+
+class binarytree(object):
+ "this tree scans for a list of all packages available in PKGDIR"
+ def __init__(self, root, pkgdir, virtual=None, settings=None):
+ if True:
+ self.root = root
+ #self.pkgdir=settings["PKGDIR"]
+ self.pkgdir = normalize_path(pkgdir)
+ self.dbapi = bindbapi(self, settings=settings)
+ self.update_ents = self.dbapi.update_ents
+ self.move_slot_ent = self.dbapi.move_slot_ent
+ self.populated = 0
+ self.tree = {}
+ self._remote_has_index = False
+ self._remotepkgs = None # remote metadata indexed by cpv
+ self.invalids = []
+ self.settings = settings
+ self._pkg_paths = {}
+ self._pkgindex_uri = {}
+ self._populating = False
+ self._all_directory = os.path.isdir(
+ os.path.join(self.pkgdir, "All"))
+ self._pkgindex_version = 0
+ self._pkgindex_hashes = ["MD5","SHA1"]
+ self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
+ self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
+ self._pkgindex_keys.update(["CPV", "MTIME", "SIZE"])
+ self._pkgindex_aux_keys = \
+ ["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI",
+ "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
+ "PROVIDE", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
+ "REQUIRED_USE", "BASE_URI"]
+ self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
+ self._pkgindex_use_evaluated_keys = \
+ ("LICENSE", "RDEPEND", "DEPEND",
+ "PDEPEND", "PROPERTIES", "PROVIDE")
+ self._pkgindex_header_keys = set([
+ "ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
+ "ACCEPT_PROPERTIES", "CBUILD",
+ "CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
+ "GENTOO_MIRRORS", "INSTALL_MASK", "SYNC", "USE"])
+ self._pkgindex_default_pkg_data = {
+ "BUILD_TIME" : "",
+ "DEPEND" : "",
+ "EAPI" : "0",
+ "IUSE" : "",
+ "KEYWORDS": "",
+ "LICENSE" : "",
+ "PATH" : "",
+ "PDEPEND" : "",
+ "PROPERTIES" : "",
+ "PROVIDE" : "",
+ "RDEPEND" : "",
+ "RESTRICT": "",
+ "SLOT" : "0",
+ "USE" : "",
+ "DEFINED_PHASES" : "",
+ "REQUIRED_USE" : ""
+ }
+ self._pkgindex_inherited_keys = ["CHOST", "repository"]
+
+ # Populate the header with appropriate defaults.
+ self._pkgindex_default_header_data = {
+ "CHOST" : self.settings.get("CHOST", ""),
+ "repository" : "",
+ }
+
+ # It is especially important to populate keys like
+ # "repository" that save space when entries can
+ # inherit them from the header. If an existing
+ # pkgindex header already defines these keys, then
+ # they will appropriately override our defaults.
+ main_repo = self.settings.repositories.mainRepo()
+ if main_repo is not None and not main_repo.missing_repo_name:
+ self._pkgindex_default_header_data["repository"] = \
+ main_repo.name
+
+ self._pkgindex_translated_keys = (
+ ("DESCRIPTION" , "DESC"),
+ ("repository" , "REPO"),
+ )
+
+ self._pkgindex_allowed_pkg_keys = set(chain(
+ self._pkgindex_keys,
+ self._pkgindex_aux_keys,
+ self._pkgindex_hashes,
+ self._pkgindex_default_pkg_data,
+ self._pkgindex_inherited_keys,
+ chain(*self._pkgindex_translated_keys)
+ ))
+
+ def move_ent(self, mylist, repo_match=None):
+ if not self.populated:
+ self.populate()
+ origcp = mylist[1]
+ newcp = mylist[2]
+ # sanity check
+ for atom in (origcp, newcp):
+ if not isjustname(atom):
+ raise InvalidPackageName(str(atom))
+ mynewcat = catsplit(newcp)[0]
+ origmatches=self.dbapi.cp_list(origcp)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ mycpv_cp = portage.cpv_getkey(mycpv)
+ if mycpv_cp != origcp:
+ # Ignore PROVIDE virtual match.
+ continue
+ if repo_match is not None \
+ and not repo_match(self.dbapi.aux_get(mycpv,
+ ['repository'])[0]):
+ continue
+ mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
+ myoldpkg = catsplit(mycpv)[1]
+ mynewpkg = catsplit(mynewcpv)[1]
+
+ if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
+ writemsg(_("!!! Cannot update binary: Destination exists.\n"),
+ noiselevel=-1)
+ writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
+ continue
+
+ tbz2path = self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
+ noiselevel=-1)
+ continue
+
+ moves += 1
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ updated_items = update_dbentries([mylist], mydata)
+ mydata.update(updated_items)
+ mydata[b'PF'] = \
+ _unicode_encode(mynewpkg + "\n",
+ encoding=_encodings['repo.content'])
+ mydata[b'CATEGORY'] = \
+ _unicode_encode(mynewcat + "\n",
+ encoding=_encodings['repo.content'])
+ if mynewpkg != myoldpkg:
+ ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
+ encoding=_encodings['repo.content']), None)
+ if ebuild_data is not None:
+ mydata[_unicode_encode(mynewpkg + '.ebuild',
+ encoding=_encodings['repo.content'])] = ebuild_data
+
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+
+ self.dbapi.cpv_remove(mycpv)
+ del self._pkg_paths[mycpv]
+ new_path = self.getname(mynewcpv)
+ self._pkg_paths[mynewcpv] = os.path.join(
+ *new_path.split(os.path.sep)[-2:])
+ if new_path != mytbz2:
+ self._ensure_dir(os.path.dirname(new_path))
+ _movefile(tbz2path, new_path, mysettings=self.settings)
+ self._remove_symlink(mycpv)
+ if new_path.split(os.path.sep)[-2] == "All":
+ self._create_symlink(mynewcpv)
+ self.inject(mynewcpv)
+
+ return moves
+
+ def _remove_symlink(self, cpv):
+ """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
+ the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
+ removed if os.path.islink() returns False."""
+ mycat, mypkg = catsplit(cpv)
+ mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+ if os.path.islink(mylink):
+ """Only remove it if it's really a link so that this method never
+ removes a real package that was placed here to avoid a collision."""
+ os.unlink(mylink)
+ try:
+ os.rmdir(os.path.join(self.pkgdir, mycat))
+ except OSError as e:
+ if e.errno not in (errno.ENOENT,
+ errno.ENOTEMPTY, errno.EEXIST):
+ raise
+ del e
+
+ def _create_symlink(self, cpv):
+ """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
+ ${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
+ exist in the location of the symlink will first be removed."""
+ mycat, mypkg = catsplit(cpv)
+ full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+ self._ensure_dir(os.path.dirname(full_path))
+ try:
+ os.unlink(full_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
+
+ def prevent_collision(self, cpv):
+ """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
+ use for a given cpv. If a collision will occur with an existing
+ package from another category, the existing package will be bumped to
+ ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
+ if not self._all_directory:
+ return
+
+ # Copy group permissions for new directories that
+ # may have been created.
+ for path in ("All", catsplit(cpv)[0]):
+ path = os.path.join(self.pkgdir, path)
+ self._ensure_dir(path)
+ if not os.access(path, os.W_OK):
+ raise PermissionDenied("access('%s', W_OK)" % path)
+
+ full_path = self.getname(cpv)
+ if "All" == full_path.split(os.path.sep)[-2]:
+ return
+ """Move a colliding package if it exists. Code below this point only
+ executes in rare cases."""
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ mypath = os.path.join("All", myfile)
+ dest_path = os.path.join(self.pkgdir, mypath)
+
+ try:
+ st = os.lstat(dest_path)
+ except OSError:
+ st = None
+ else:
+ if stat.S_ISLNK(st.st_mode):
+ st = None
+ try:
+ os.unlink(dest_path)
+ except OSError:
+ if os.path.exists(dest_path):
+ raise
+
+ if st is not None:
+ # For invalid packages, other_cat could be None.
+ other_cat = portage.xpak.tbz2(dest_path).getfile(b"CATEGORY")
+ if other_cat:
+ other_cat = _unicode_decode(other_cat,
+ encoding=_encodings['repo.content'], errors='replace')
+ other_cat = other_cat.strip()
+ other_cpv = other_cat + "/" + mypkg
+ self._move_from_all(other_cpv)
+ self.inject(other_cpv)
+ self._move_to_all(cpv)
+
+ def _ensure_dir(self, path):
+ """
+ Create the specified directory. Also, copy gid and group mode
+ bits from self.pkgdir if possible.
+ @param cat_dir: Absolute path of the directory to be created.
+ @type cat_dir: String
+ """
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ ensure_dirs(path)
+ return
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
+ try:
+ ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ if not os.path.isdir(path):
+ raise
+
+ def _move_to_all(self, cpv):
+ """If the file exists, move it. Whether or not it exists, update state
+ for future getname() calls."""
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ self._pkg_paths[cpv] = os.path.join("All", myfile)
+ src_path = os.path.join(self.pkgdir, mycat, myfile)
+ try:
+ mystat = os.lstat(src_path)
+ except OSError as e:
+ mystat = None
+ if mystat and stat.S_ISREG(mystat.st_mode):
+ self._ensure_dir(os.path.join(self.pkgdir, "All"))
+ dest_path = os.path.join(self.pkgdir, "All", myfile)
+ _movefile(src_path, dest_path, mysettings=self.settings)
+ self._create_symlink(cpv)
+ self.inject(cpv)
+
+ def _move_from_all(self, cpv):
+ """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
+ ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
+ self._remove_symlink(cpv)
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ mypath = os.path.join(mycat, myfile)
+ dest_path = os.path.join(self.pkgdir, mypath)
+ self._ensure_dir(os.path.dirname(dest_path))
+ src_path = os.path.join(self.pkgdir, "All", myfile)
+ _movefile(src_path, dest_path, mysettings=self.settings)
+ self._pkg_paths[cpv] = mypath
+
+ def populate(self, getbinpkgs=0):
+ "populates the binarytree"
+
+ if self._populating:
+ return
+
+ pkgindex_lock = None
+ try:
+ if os.access(self.pkgdir, os.W_OK):
+ pkgindex_lock = lockfile(self._pkgindex_file,
+ wantnewlockfile=1)
+ self._populating = True
+ self._populate(getbinpkgs)
+ finally:
+ if pkgindex_lock:
+ unlockfile(pkgindex_lock)
+ self._populating = False
+
+ def _populate(self, getbinpkgs=0):
+ if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
+ return 0
+
+ # Clear all caches in case populate is called multiple times
+ # as may be the case when _global_updates calls populate()
+ # prior to performing package moves since it only wants to
+ # operate on local packages (getbinpkgs=0).
+ self._remotepkgs = None
+ self.dbapi._clear_cache()
+ self.dbapi._aux_cache.clear()
+ if True:
+ pkg_paths = {}
+ self._pkg_paths = pkg_paths
+ dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
+ if "All" in dirs:
+ dirs.remove("All")
+ dirs.sort()
+ dirs.insert(0, "All")
+ pkgindex = self._load_pkgindex()
+ pf_index = None
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+ header = pkgindex.header
+ metadata = {}
+ for d in pkgindex.packages:
+ metadata[d["CPV"]] = d
+ update_pkgindex = False
+ for mydir in dirs:
+ for myfile in listdir(os.path.join(self.pkgdir, mydir)):
+ if not myfile.endswith(".tbz2"):
+ continue
+ mypath = os.path.join(mydir, myfile)
+ full_path = os.path.join(self.pkgdir, mypath)
+ s = os.lstat(full_path)
+ if stat.S_ISLNK(s.st_mode):
+ continue
+
+ # Validate data from the package index and try to avoid
+ # reading the xpak if possible.
+ if mydir != "All":
+ possibilities = None
+ d = metadata.get(mydir+"/"+myfile[:-5])
+ if d:
+ possibilities = [d]
+ else:
+ if pf_index is None:
+ pf_index = {}
+ for mycpv in metadata:
+ mycat, mypf = catsplit(mycpv)
+ pf_index.setdefault(
+ mypf, []).append(metadata[mycpv])
+ possibilities = pf_index.get(myfile[:-5])
+ if possibilities:
+ match = None
+ for d in possibilities:
+ try:
+ if long(d["MTIME"]) != s[stat.ST_MTIME]:
+ continue
+ except (KeyError, ValueError):
+ continue
+ try:
+ if long(d["SIZE"]) != long(s.st_size):
+ continue
+ except (KeyError, ValueError):
+ continue
+ if not self._pkgindex_keys.difference(d):
+ match = d
+ break
+ if match:
+ mycpv = match["CPV"]
+ if mycpv in pkg_paths:
+ # discard duplicates (All/ is preferred)
+ continue
+ pkg_paths[mycpv] = mypath
+ # update the path if the package has been moved
+ oldpath = d.get("PATH")
+ if oldpath and oldpath != mypath:
+ update_pkgindex = True
+ if mypath != mycpv + ".tbz2":
+ d["PATH"] = mypath
+ if not oldpath:
+ update_pkgindex = True
+ else:
+ d.pop("PATH", None)
+ if oldpath:
+ update_pkgindex = True
+ self.dbapi.cpv_inject(mycpv)
+ if not self.dbapi._aux_cache_keys.difference(d):
+ aux_cache = self.dbapi._aux_cache_slot_dict()
+ for k in self.dbapi._aux_cache_keys:
+ aux_cache[k] = d[k]
+ self.dbapi._aux_cache[mycpv] = aux_cache
+ continue
+ if not os.access(full_path, os.R_OK):
+ writemsg(_("!!! Permission denied to read " \
+ "binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ self.invalids.append(myfile[:-5])
+ continue
+ metadata_bytes = portage.xpak.tbz2(full_path).get_data()
+ mycat = _unicode_decode(metadata_bytes.get(b"CATEGORY", ""),
+ encoding=_encodings['repo.content'], errors='replace')
+ mypf = _unicode_decode(metadata_bytes.get(b"PF", ""),
+ encoding=_encodings['repo.content'], errors='replace')
+ slot = _unicode_decode(metadata_bytes.get(b"SLOT", ""),
+ encoding=_encodings['repo.content'], errors='replace')
+ mypkg = myfile[:-5]
+ if not mycat or not mypf or not slot:
+ #old-style or corrupt package
+ writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ missing_keys = []
+ if not mycat:
+ missing_keys.append("CATEGORY")
+ if not mypf:
+ missing_keys.append("PF")
+ if not slot:
+ missing_keys.append("SLOT")
+ msg = []
+ if missing_keys:
+ missing_keys.sort()
+ msg.append(_("Missing metadata key(s): %s.") % \
+ ", ".join(missing_keys))
+ msg.append(_(" This binary package is not " \
+ "recoverable and should be deleted."))
+ for line in textwrap.wrap("".join(msg), 72):
+ writemsg("!!! %s\n" % line, noiselevel=-1)
+ self.invalids.append(mypkg)
+ continue
+ mycat = mycat.strip()
+ slot = slot.strip()
+ if mycat != mydir and mydir != "All":
+ continue
+ if mypkg != mypf.strip():
+ continue
+ mycpv = mycat + "/" + mypkg
+ if mycpv in pkg_paths:
+ # All is first, so it's preferred.
+ continue
+ if not self.dbapi._category_re.match(mycat):
+ writemsg(_("!!! Binary package has an " \
+ "unrecognized category: '%s'\n") % full_path,
+ noiselevel=-1)
+ writemsg(_("!!! '%s' has a category that is not" \
+ " listed in %setc/portage/categories\n") % \
+ (mycpv, self.settings["PORTAGE_CONFIGROOT"]),
+ noiselevel=-1)
+ continue
+ pkg_paths[mycpv] = mypath
+ self.dbapi.cpv_inject(mycpv)
+ update_pkgindex = True
+ d = metadata.get(mycpv, {})
+ if d:
+ try:
+ if long(d["MTIME"]) != s[stat.ST_MTIME]:
+ d.clear()
+ except (KeyError, ValueError):
+ d.clear()
+ if d:
+ try:
+ if long(d["SIZE"]) != long(s.st_size):
+ d.clear()
+ except (KeyError, ValueError):
+ d.clear()
+
+ d["CPV"] = mycpv
+ d["SLOT"] = slot
+ d["MTIME"] = str(s[stat.ST_MTIME])
+ d["SIZE"] = str(s.st_size)
+
+ d.update(zip(self._pkgindex_aux_keys,
+ self.dbapi.aux_get(mycpv, self._pkgindex_aux_keys)))
+ try:
+ self._eval_use_flags(mycpv, d)
+ except portage.exception.InvalidDependString:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % \
+ self.getname(mycpv), noiselevel=-1)
+ self.dbapi.cpv_remove(mycpv)
+ del pkg_paths[mycpv]
+
+ # record location if it's non-default
+ if mypath != mycpv + ".tbz2":
+ d["PATH"] = mypath
+ else:
+ d.pop("PATH", None)
+ metadata[mycpv] = d
+ if not self.dbapi._aux_cache_keys.difference(d):
+ aux_cache = self.dbapi._aux_cache_slot_dict()
+ for k in self.dbapi._aux_cache_keys:
+ aux_cache[k] = d[k]
+ self.dbapi._aux_cache[mycpv] = aux_cache
+
+ for cpv in list(metadata):
+ if cpv not in pkg_paths:
+ del metadata[cpv]
+
+ # Do not bother to write the Packages index if $PKGDIR/All/ exists
+ # since it will provide no benefit due to the need to read CATEGORY
+ # from xpak.
+ if update_pkgindex and os.access(self.pkgdir, os.W_OK):
+ del pkgindex.packages[:]
+ pkgindex.packages.extend(iter(metadata.values()))
+ self._update_pkgindex_header(pkgindex.header)
+ f = atomic_ofstream(self._pkgindex_file)
+ pkgindex.write(f)
+ f.close()
+
+ if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
+ writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
+ noiselevel=-1)
+
+ if not getbinpkgs or 'PORTAGE_BINHOST' not in self.settings:
+ self.populated=1
+ return
+ self._remotepkgs = {}
+ for base_url in self.settings["PORTAGE_BINHOST"].split():
+ parsed_url = urlparse(base_url)
+ host = parsed_url.netloc
+ port = parsed_url.port
+ user = None
+ passwd = None
+ user_passwd = ""
+ if "@" in host:
+ user, host = host.split("@", 1)
+ user_passwd = user + "@"
+ if ":" in user:
+ user, passwd = user.split(":", 1)
+ port_args = []
+ if port is not None:
+ port_str = ":%s" % (port,)
+ if host.endswith(port_str):
+ host = host[:-len(port_str)]
+ pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
+ host, parsed_url.path.lstrip("/"), "Packages")
+ pkgindex = self._new_pkgindex()
+ try:
+ f = io.open(_unicode_encode(pkgindex_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ try:
+ pkgindex.read(f)
+ finally:
+ f.close()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ local_timestamp = pkgindex.header.get("TIMESTAMP", None)
+ rmt_idx = self._new_pkgindex()
+ proc = None
+ tmp_filename = None
+ try:
+ # urlparse.urljoin() only works correctly with recognized
+ # protocols and requires the base url to have a trailing
+ # slash, so join manually...
+ url = base_url.rstrip("/") + "/Packages"
+ try:
+ f = urllib_request_urlopen(url)
+ except IOError:
+ path = parsed_url.path.rstrip("/") + "/Packages"
+
+ if parsed_url.scheme == 'sftp':
+ # The sftp command complains about 'Illegal seek' if
+ # we try to make it write to /dev/stdout, so use a
+ # temp file instead.
+ fd, tmp_filename = tempfile.mkstemp()
+ os.close(fd)
+ if port is not None:
+ port_args = ['-P', "%s" % (port,)]
+ proc = subprocess.Popen(['sftp'] + port_args + \
+ [user_passwd + host + ":" + path, tmp_filename])
+ if proc.wait() != os.EX_OK:
+ raise
+ f = open(tmp_filename, 'rb')
+ elif parsed_url.scheme == 'ssh':
+ if port is not None:
+ port_args = ['-p', "%s" % (port,)]
+ proc = subprocess.Popen(['ssh'] + port_args + \
+ [user_passwd + host, '--', 'cat', path],
+ stdout=subprocess.PIPE)
+ f = proc.stdout
+ else:
+ setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
+ fcmd = self.settings.get(setting)
+ if not fcmd:
+ raise
+ fd, tmp_filename = tempfile.mkstemp()
+ tmp_dirname, tmp_basename = os.path.split(tmp_filename)
+ os.close(fd)
+ success = portage.getbinpkg.file_get(url,
+ tmp_dirname, fcmd=fcmd, filename=tmp_basename)
+ if not success:
+ raise EnvironmentError("%s failed" % (setting,))
+ f = open(tmp_filename, 'rb')
+
+ f_dec = codecs.iterdecode(f,
+ _encodings['repo.content'], errors='replace')
+ try:
+ rmt_idx.readHeader(f_dec)
+ remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
+ if not remote_timestamp:
+ # no timestamp in the header, something's wrong
+ pkgindex = None
+ writemsg(_("\n\n!!! Binhost package index " \
+ " has no TIMESTAMP field.\n"), noiselevel=-1)
+ else:
+ if not self._pkgindex_version_supported(rmt_idx):
+ writemsg(_("\n\n!!! Binhost package index version" \
+ " is not supported: '%s'\n") % \
+ rmt_idx.header.get("VERSION"), noiselevel=-1)
+ pkgindex = None
+ elif local_timestamp != remote_timestamp:
+ rmt_idx.readBody(f_dec)
+ pkgindex = rmt_idx
+ finally:
+ # Timeout after 5 seconds, in case close() blocks
+ # indefinitely (see bug #350139).
+ try:
+ try:
+ AlarmSignal.register(5)
+ f.close()
+ finally:
+ AlarmSignal.unregister()
+ except AlarmSignal:
+ writemsg("\n\n!!! %s\n" % \
+ _("Timed out while closing connection to binhost"),
+ noiselevel=-1)
+ except EnvironmentError as e:
+ writemsg(_("\n\n!!! Error fetching binhost package" \
+ " info from '%s'\n") % base_url)
+ writemsg("!!! %s\n\n" % str(e))
+ del e
+ pkgindex = None
+ if proc is not None:
+ if proc.poll() is None:
+ proc.kill()
+ proc.wait()
+ proc = None
+ if tmp_filename is not None:
+ try:
+ os.unlink(tmp_filename)
+ except OSError:
+ pass
+ if pkgindex is rmt_idx:
+ pkgindex.modified = False # don't update the header
+ try:
+ ensure_dirs(os.path.dirname(pkgindex_file))
+ f = atomic_ofstream(pkgindex_file)
+ pkgindex.write(f)
+ f.close()
+ except (IOError, PortageException):
+ if os.access(os.path.dirname(pkgindex_file), os.W_OK):
+ raise
+ # The current user doesn't have permission to cache the
+ # file, but that's alright.
+ if pkgindex:
+ # Organize remote package list as a cpv -> metadata map.
+ remotepkgs = _pkgindex_cpv_map_latest_build(pkgindex)
+ remote_base_uri = pkgindex.header.get("URI", base_url)
+ for cpv, remote_metadata in remotepkgs.items():
+ remote_metadata["BASE_URI"] = remote_base_uri
+ self._pkgindex_uri[cpv] = url
+ self._remotepkgs.update(remotepkgs)
+ self._remote_has_index = True
+ for cpv in remotepkgs:
+ self.dbapi.cpv_inject(cpv)
+ if True:
+ # Remote package instances override local package
+ # if they are not identical.
+ hash_names = ["SIZE"] + self._pkgindex_hashes
+ for cpv, local_metadata in metadata.items():
+ remote_metadata = self._remotepkgs.get(cpv)
+ if remote_metadata is None:
+ continue
+ # Use digests to compare identity.
+ identical = True
+ for hash_name in hash_names:
+ local_value = local_metadata.get(hash_name)
+ if local_value is None:
+ continue
+ remote_value = remote_metadata.get(hash_name)
+ if remote_value is None:
+ continue
+ if local_value != remote_value:
+ identical = False
+ break
+ if identical:
+ del self._remotepkgs[cpv]
+ else:
+ # Override the local package in the aux_get cache.
+ self.dbapi._aux_cache[cpv] = remote_metadata
+ else:
+ # Local package instances override remote instances.
+ for cpv in metadata:
+ self._remotepkgs.pop(cpv, None)
+ continue
+ try:
+ chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
+ if chunk_size < 8:
+ chunk_size = 8
+ except (ValueError, KeyError):
+ chunk_size = 3000
+ writemsg_stdout("\n")
+ writemsg_stdout(
+ colorize("GOOD", _("Fetching bininfo from ")) + \
+ re.sub(r'//(.+):.+@(.+)/', r'//\1:*password*@\2/', base_url) + "\n")
+ remotepkgs = portage.getbinpkg.dir_get_metadata(
+ base_url, chunk_size=chunk_size)
+
+ for mypkg, remote_metadata in remotepkgs.items():
+ mycat = remote_metadata.get("CATEGORY")
+ if mycat is None:
+ #old-style or corrupt package
+ writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
+ noiselevel=-1)
+ continue
+ mycat = mycat.strip()
+ fullpkg = mycat+"/"+mypkg[:-5]
+
+ if fullpkg in metadata:
+ # When using this old protocol, comparison with the remote
+ # package isn't supported, so the local package is always
+ # preferred even if getbinpkgsonly is enabled.
+ continue
+
+ if not self.dbapi._category_re.match(mycat):
+ writemsg(_("!!! Remote binary package has an " \
+ "unrecognized category: '%s'\n") % fullpkg,
+ noiselevel=-1)
+ writemsg(_("!!! '%s' has a category that is not" \
+ " listed in %setc/portage/categories\n") % \
+ (fullpkg, self.settings["PORTAGE_CONFIGROOT"]),
+ noiselevel=-1)
+ continue
+ mykey = portage.cpv_getkey(fullpkg)
+ try:
+ # invalid tbz2's can hurt things.
+ self.dbapi.cpv_inject(fullpkg)
+ for k, v in remote_metadata.items():
+ remote_metadata[k] = v.strip()
+ remote_metadata["BASE_URI"] = base_url
+
+ # Eliminate metadata values with names that digestCheck
+ # uses, since they are not valid when using the old
+ # protocol. Typically this is needed for SIZE metadata
+ # which corresponds to the size of the unpacked files
+ # rather than the binpkg file size, triggering digest
+ # verification failures as reported in bug #303211.
+ remote_metadata.pop('SIZE', None)
+ for k in portage.checksum.hashfunc_map:
+ remote_metadata.pop(k, None)
+
+ self._remotepkgs[fullpkg] = remote_metadata
+ except SystemExit as e:
+ raise
+ except:
+ writemsg(_("!!! Failed to inject remote binary package: %s\n") % fullpkg,
+ noiselevel=-1)
+ continue
+ self.populated=1
+
+ def inject(self, cpv, filename=None):
+ """Add a freshly built package to the database. This updates
+ $PKGDIR/Packages with the new package metadata (including MD5).
+ @param cpv: The cpv of the new package to inject
+ @type cpv: string
+ @param filename: File path of the package to inject, or None if it's
+ already in the location returned by getname()
+ @type filename: string
+ @rtype: None
+ """
+ mycat, mypkg = catsplit(cpv)
+ if not self.populated:
+ self.populate()
+ if filename is None:
+ full_path = self.getname(cpv)
+ else:
+ full_path = filename
+ try:
+ s = os.stat(full_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
+ noiselevel=-1)
+ return
+ mytbz2 = portage.xpak.tbz2(full_path)
+ slot = mytbz2.getfile("SLOT")
+ if slot is None:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ return
+ slot = slot.strip()
+ self.dbapi.cpv_inject(cpv)
+
+ # Reread the Packages index (in case it's been changed by another
+ # process) and then updated it, all while holding a lock.
+ pkgindex_lock = None
+ created_symlink = False
+ try:
+ pkgindex_lock = lockfile(self._pkgindex_file,
+ wantnewlockfile=1)
+ if filename is not None:
+ new_filename = self.getname(cpv)
+ try:
+ samefile = os.path.samefile(filename, new_filename)
+ except OSError:
+ samefile = False
+ if not samefile:
+ self._ensure_dir(os.path.dirname(new_filename))
+ _movefile(filename, new_filename, mysettings=self.settings)
+ if self._all_directory and \
+ self.getname(cpv).split(os.path.sep)[-2] == "All":
+ self._create_symlink(cpv)
+ created_symlink = True
+ pkgindex = self._load_pkgindex()
+
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+
+ # Discard remote metadata to ensure that _pkgindex_entry
+ # gets the local metadata. This also updates state for future
+ # isremote calls.
+ if self._remotepkgs is not None:
+ self._remotepkgs.pop(cpv, None)
+
+ # Discard cached metadata to ensure that _pkgindex_entry
+ # doesn't return stale metadata.
+ self.dbapi._aux_cache.pop(cpv, None)
+
+ try:
+ d = self._pkgindex_entry(cpv)
+ except portage.exception.InvalidDependString:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % \
+ self.getname(cpv), noiselevel=-1)
+ self.dbapi.cpv_remove(cpv)
+ del self._pkg_paths[cpv]
+ return
+
+ # If found, remove package(s) with duplicate path.
+ path = d.get("PATH", "")
+ for i in range(len(pkgindex.packages) - 1, -1, -1):
+ d2 = pkgindex.packages[i]
+ if path and path == d2.get("PATH"):
+ # Handle path collisions in $PKGDIR/All
+ # when CPV is not identical.
+ del pkgindex.packages[i]
+ elif cpv == d2.get("CPV"):
+ if path == d2.get("PATH", ""):
+ del pkgindex.packages[i]
+ elif created_symlink and not d2.get("PATH", ""):
+ # Delete entry for the package that was just
+ # overwritten by a symlink to this package.
+ del pkgindex.packages[i]
+
+ pkgindex.packages.append(d)
+
+ self._update_pkgindex_header(pkgindex.header)
+ f = atomic_ofstream(os.path.join(self.pkgdir, "Packages"))
+ pkgindex.write(f)
+ f.close()
+ finally:
+ if pkgindex_lock:
+ unlockfile(pkgindex_lock)
+
+ def _pkgindex_entry(self, cpv):
+ """
+ Performs checksums and evaluates USE flag conditionals.
+ Raises InvalidDependString if necessary.
+ @rtype: dict
+ @returns: a dict containing entry for the give cpv.
+ """
+
+ pkg_path = self.getname(cpv)
+
+ d = dict(zip(self._pkgindex_aux_keys,
+ self.dbapi.aux_get(cpv, self._pkgindex_aux_keys)))
+
+ d.update(perform_multiple_checksums(
+ pkg_path, hashes=self._pkgindex_hashes))
+
+ d["CPV"] = cpv
+ st = os.stat(pkg_path)
+ d["MTIME"] = str(st[stat.ST_MTIME])
+ d["SIZE"] = str(st.st_size)
+
+ rel_path = self._pkg_paths[cpv]
+ # record location if it's non-default
+ if rel_path != cpv + ".tbz2":
+ d["PATH"] = rel_path
+
+ self._eval_use_flags(cpv, d)
+ return d
+
+ def _new_pkgindex(self):
+ return portage.getbinpkg.PackageIndex(
+ allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
+ default_header_data=self._pkgindex_default_header_data,
+ default_pkg_data=self._pkgindex_default_pkg_data,
+ inherited_keys=self._pkgindex_inherited_keys,
+ translated_keys=self._pkgindex_translated_keys)
+
+ def _update_pkgindex_header(self, header):
+ portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
+ profiles_base = os.path.join(portdir, "profiles") + os.path.sep
+ if self.settings.profile_path:
+ profile_path = normalize_path(
+ os.path.realpath(self.settings.profile_path))
+ if profile_path.startswith(profiles_base):
+ profile_path = profile_path[len(profiles_base):]
+ header["PROFILE"] = profile_path
+ header["VERSION"] = str(self._pkgindex_version)
+ base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
+ if base_uri:
+ header["URI"] = base_uri
+ else:
+ header.pop("URI", None)
+ for k in self._pkgindex_header_keys:
+ v = self.settings.get(k, None)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
+ def _pkgindex_version_supported(self, pkgindex):
+ version = pkgindex.header.get("VERSION")
+ if version:
+ try:
+ if int(version) <= self._pkgindex_version:
+ return True
+ except ValueError:
+ pass
+ return False
+
+ def _eval_use_flags(self, cpv, metadata):
+ use = frozenset(metadata["USE"].split())
+ raw_use = use
+ iuse = set(f.lstrip("-+") for f in metadata["IUSE"].split())
+ use = [f for f in use if f in iuse]
+ use.sort()
+ metadata["USE"] = " ".join(use)
+ for k in self._pkgindex_use_evaluated_keys:
+ if k.endswith('DEPEND'):
+ token_class = Atom
+ else:
+ token_class = None
+
+ try:
+ deps = metadata[k]
+ deps = use_reduce(deps, uselist=raw_use, token_class=token_class)
+ deps = paren_enclose(deps)
+ except portage.exception.InvalidDependString as e:
+ writemsg("%s: %s\n" % (k, str(e)),
+ noiselevel=-1)
+ raise
+ metadata[k] = deps
+
+ def exists_specific(self, cpv):
+ if not self.populated:
+ self.populate()
+ return self.dbapi.match(
+ dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
+
+ def dep_bestmatch(self, mydep):
+ "compatibility method -- all matches, not just visible ones"
+ if not self.populated:
+ self.populate()
+ writemsg("\n\n", 1)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mykey = dep_getkey(mydep)
+ writemsg("mykey: %s\n" % mykey, 1)
+ mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
+ writemsg("mymatch: %s\n" % mymatch, 1)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def getname(self, pkgname):
+ """Returns a file location for this package. The default location is
+ ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
+ in the rare event of a collision. The prevent_collision() method can
+ be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
+ specific cpv."""
+ if not self.populated:
+ self.populate()
+ mycpv = pkgname
+ mypath = self._pkg_paths.get(mycpv, None)
+ if mypath:
+ return os.path.join(self.pkgdir, mypath)
+ mycat, mypkg = catsplit(mycpv)
+ if self._all_directory:
+ mypath = os.path.join("All", mypkg + ".tbz2")
+ if mypath in self._pkg_paths.values():
+ mypath = os.path.join(mycat, mypkg + ".tbz2")
+ else:
+ mypath = os.path.join(mycat, mypkg + ".tbz2")
+ self._pkg_paths[mycpv] = mypath # cache for future lookups
+ return os.path.join(self.pkgdir, mypath)
+
+ def isremote(self, pkgname):
+ """Returns true if the package is kept remotely and it has not been
+ downloaded (or it is only partially downloaded)."""
+ if self._remotepkgs is None or pkgname not in self._remotepkgs:
+ return False
+ # Presence in self._remotepkgs implies that it's remote. When a
+ # package is downloaded, state is updated by self.inject().
+ return True
+
+ def get_pkgindex_uri(self, pkgname):
+ """Returns the URI to the Packages file for a given package."""
+ return self._pkgindex_uri.get(pkgname)
+
+ def gettbz2(self, pkgname):
+ """Fetches the package from a remote site, if necessary. Attempts to
+ resume if the file appears to be partially downloaded."""
+ tbz2_path = self.getname(pkgname)
+ tbz2name = os.path.basename(tbz2_path)
+ resume = False
+ if os.path.exists(tbz2_path):
+ if (tbz2name not in self.invalids):
+ return
+ else:
+ resume = True
+ writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
+ noiselevel=-1)
+
+ mydest = os.path.dirname(self.getname(pkgname))
+ self._ensure_dir(mydest)
+ # urljoin doesn't work correctly with unrecognized protocols like sftp
+ if self._remote_has_index:
+ rel_url = self._remotepkgs[pkgname].get("PATH")
+ if not rel_url:
+ rel_url = pkgname+".tbz2"
+ remote_base_uri = self._remotepkgs[pkgname]["BASE_URI"]
+ url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
+ else:
+ url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
+ protocol = urlparse(url)[0]
+ fcmd_prefix = "FETCHCOMMAND"
+ if resume:
+ fcmd_prefix = "RESUMECOMMAND"
+ fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
+ if not fcmd:
+ fcmd = self.settings.get(fcmd_prefix)
+ success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
+ if not success:
+ try:
+ os.unlink(self.getname(pkgname))
+ except OSError:
+ pass
+ raise portage.exception.FileNotFound(mydest)
+ self.inject(pkgname)
+
+ def _load_pkgindex(self):
+ pkgindex = self._new_pkgindex()
+ try:
+ f = io.open(_unicode_encode(self._pkgindex_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ try:
+ pkgindex.read(f)
+ finally:
+ f.close()
+ return pkgindex
+
+ def digestCheck(self, pkg):
+ """
+ Verify digests for the given package and raise DigestException
+ if verification fails.
+ @rtype: bool
+ @returns: True if digests could be located, False otherwise.
+ """
+ cpv = pkg
+ if not isinstance(cpv, basestring):
+ cpv = pkg.cpv
+ pkg = None
+
+ pkg_path = self.getname(cpv)
+ metadata = None
+ if self._remotepkgs is None or cpv not in self._remotepkgs:
+ for d in self._load_pkgindex().packages:
+ if d["CPV"] == cpv:
+ metadata = d
+ break
+ else:
+ metadata = self._remotepkgs[cpv]
+ if metadata is None:
+ return False
+
+ digests = {}
+ for k in hashfunc_map:
+ v = metadata.get(k)
+ if not v:
+ continue
+ digests[k] = v
+
+ if "SIZE" in metadata:
+ try:
+ digests["size"] = int(metadata["SIZE"])
+ except ValueError:
+ writemsg(_("!!! Malformed SIZE attribute in remote " \
+ "metadata for '%s'\n") % cpv)
+
+ if not digests:
+ return False
+
+ eout = EOutput()
+ eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
+ ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
+ if not ok:
+ ok, reason = verify_all(pkg_path, digests)
+ if not ok:
+ raise portage.exception.DigestException(
+ (pkg_path,) + tuple(reason))
+
+ return True
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ pass
+ return myslot
diff --git a/portage_with_autodep/pym/portage/dbapi/cpv_expand.py b/portage_with_autodep/pym/portage/dbapi/cpv_expand.py
new file mode 100644
index 0000000..947194c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/cpv_expand.py
@@ -0,0 +1,106 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["cpv_expand"]
+
+import portage
+from portage.exception import AmbiguousPackageName
+from portage.localization import _
+from portage.util import writemsg
+from portage.versions import _pkgsplit
+
+def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
+ """Given a string (packagename or virtual) expand it into a valid
+ cat/package string. Virtuals use the mydb to determine which provided
+ virtual is a valid choice and defaults to the first element when there
+ are no installed/available candidates."""
+ myslash=mycpv.split("/")
+ mysplit = _pkgsplit(myslash[-1])
+ if settings is None:
+ try:
+ settings = mydb.settings
+ except AttributeError:
+ settings = portage.settings
+ if len(myslash)>2:
+ # this is illegal case.
+ mysplit=[]
+ mykey=mycpv
+ elif len(myslash)==2:
+ if mysplit:
+ mykey=myslash[0]+"/"+mysplit[0]
+ else:
+ mykey=mycpv
+
+ # Since Gentoo stopped using old-style virtuals in
+ # 2011, typically it's possible to avoid getvirtuals()
+ # calls entirely. Therefore, only call getvirtuals()
+ # if the atom category is "virtual" and cp_list()
+ # returns nothing.
+ if mykey.startswith("virtual/") and \
+ hasattr(mydb, "cp_list") and \
+ not mydb.cp_list(mykey, use_cache=use_cache):
+ if hasattr(mydb, "vartree"):
+ settings._populate_treeVirtuals_if_needed(mydb.vartree)
+ virts = settings.getvirtuals().get(mykey)
+ if virts:
+ mykey_orig = mykey
+ for vkey in virts:
+ # The virtuals file can contain a versioned atom, so
+ # it may be necessary to remove the operator and
+ # version from the atom before it is passed into
+ # dbapi.cp_list().
+ if mydb.cp_list(vkey.cp):
+ mykey = str(vkey)
+ break
+ if mykey == mykey_orig:
+ mykey = str(virts[0])
+ #we only perform virtual expansion if we are passed a dbapi
+ else:
+ #specific cpv, no category, ie. "foo-1.0"
+ if mysplit:
+ myp=mysplit[0]
+ else:
+ # "foo" ?
+ myp=mycpv
+ mykey=None
+ matches=[]
+ if mydb and hasattr(mydb, "categories"):
+ for x in mydb.categories:
+ if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
+ matches.append(x+"/"+myp)
+ if len(matches) > 1:
+ virtual_name_collision = False
+ if len(matches) == 2:
+ for x in matches:
+ if not x.startswith("virtual/"):
+ # Assume that the non-virtual is desired. This helps
+ # avoid the ValueError for invalid deps that come from
+ # installed packages (during reverse blocker detection,
+ # for example).
+ mykey = x
+ else:
+ virtual_name_collision = True
+ if not virtual_name_collision:
+ # AmbiguousPackageName inherits from ValueError,
+ # for backward compatibility with calling code
+ # that already handles ValueError.
+ raise AmbiguousPackageName(matches)
+ elif matches:
+ mykey=matches[0]
+
+ if not mykey and not isinstance(mydb, list):
+ if hasattr(mydb, "vartree"):
+ settings._populate_treeVirtuals_if_needed(mydb.vartree)
+ virts_p = settings.get_virts_p().get(myp)
+ if virts_p:
+ mykey = virts_p[0]
+ #again, we only perform virtual expansion if we have a dbapi (not a list)
+ if not mykey:
+ mykey="null/"+myp
+ if mysplit:
+ if mysplit[2]=="r0":
+ return mykey+"-"+mysplit[1]
+ else:
+ return mykey+"-"+mysplit[1]+"-"+mysplit[2]
+ else:
+ return mykey
diff --git a/portage_with_autodep/pym/portage/dbapi/dep_expand.py b/portage_with_autodep/pym/portage/dbapi/dep_expand.py
new file mode 100644
index 0000000..ac8ccf4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/dep_expand.py
@@ -0,0 +1,56 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["dep_expand"]
+
+import re
+
+from portage.dbapi.cpv_expand import cpv_expand
+from portage.dep import Atom, isvalidatom
+from portage.exception import InvalidAtom
+from portage.versions import catsplit
+
+def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
+ '''
+ @rtype: Atom
+ '''
+ orig_dep = mydep
+ if isinstance(orig_dep, Atom):
+ has_cat = True
+ else:
+ if not mydep:
+ return mydep
+ if mydep[0] == "*":
+ mydep = mydep[1:]
+ orig_dep = mydep
+ has_cat = '/' in orig_dep
+ if not has_cat:
+ alphanum = re.search(r'\w', orig_dep)
+ if alphanum:
+ mydep = orig_dep[:alphanum.start()] + "null/" + \
+ orig_dep[alphanum.start():]
+ try:
+ mydep = Atom(mydep, allow_repo=True)
+ except InvalidAtom:
+ # Missing '=' prefix is allowed for backward compatibility.
+ if not isvalidatom("=" + mydep, allow_repo=True):
+ raise
+ mydep = Atom('=' + mydep, allow_repo=True)
+ orig_dep = '=' + orig_dep
+ if not has_cat:
+ null_cat, pn = catsplit(mydep.cp)
+ mydep = pn
+
+ if has_cat:
+ # Optimize most common cases to avoid calling cpv_expand.
+ if not mydep.cp.startswith("virtual/"):
+ return mydep
+ if not hasattr(mydb, "cp_list") or \
+ mydb.cp_list(mydep.cp):
+ return mydep
+ # Fallback to legacy cpv_expand for old-style PROVIDE virtuals.
+ mydep = mydep.cp
+
+ expanded = cpv_expand(mydep, mydb=mydb,
+ use_cache=use_cache, settings=settings)
+ return Atom(orig_dep.replace(mydep, expanded, 1), allow_repo=True)
diff --git a/portage_with_autodep/pym/portage/dbapi/porttree.py b/portage_with_autodep/pym/portage/dbapi/porttree.py
new file mode 100644
index 0000000..ecf275c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/porttree.py
@@ -0,0 +1,1168 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+ "close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"
+]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum',
+ 'portage.data:portage_gid,secpass',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dep:Atom,dep_getkey,match_from_list,use_reduce',
+ 'portage.package.ebuild.doebuild:doebuild',
+ 'portage.util:ensure_dirs,shlex_split,writemsg,writemsg_level',
+ 'portage.util.listdir:listdir',
+ 'portage.versions:best,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp',
+)
+
+from portage.cache import metadata_overlay, volatile
+from portage.cache.cache_errors import CacheError
+from portage.cache.mappings import Mapping
+from portage.dbapi import dbapi
+from portage.exception import PortageException, \
+ FileNotFound, InvalidAtom, InvalidDependString, InvalidPackageName
+from portage.localization import _
+from portage.manifest import Manifest
+
+from portage import eclass_cache, auxdbkeys, \
+ eapi_is_supported, dep_check, \
+ _eapi_is_deprecated
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import OrderedDict
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from _emerge.PollScheduler import PollScheduler
+
+import os as _os
+import io
+import stat
+import sys
+import traceback
+import warnings
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class _repo_info(object):
+ __slots__ = ('name', 'path', 'eclass_db', 'portdir', 'portdir_overlay')
+ def __init__(self, name, path, eclass_db):
+ self.name = name
+ self.path = path
+ self.eclass_db = eclass_db
+ self.portdir = eclass_db.porttrees[0]
+ self.portdir_overlay = ' '.join(eclass_db.porttrees[1:])
+
+class portdbapi(dbapi):
+ """this tree will scan a portage directory located at root (passed to init)"""
+ portdbapi_instances = []
+ _use_mutable = True
+
+ @property
+ def _categories(self):
+ return self.settings.categories
+
+ @property
+ def porttree_root(self):
+ return self.settings.repositories.mainRepoLocation()
+
+ def __init__(self, _unused_param=None, mysettings=None):
+ """
+ @param _unused_param: deprecated, use mysettings['PORTDIR'] instead
+ @type _unused_param: None
+ @param mysettings: an immutable config instance
+ @type mysettings: portage.config
+ """
+ portdbapi.portdbapi_instances.append(self)
+
+ from portage import config
+ if mysettings:
+ self.settings = mysettings
+ else:
+ from portage import settings
+ self.settings = config(clone=settings)
+
+ if _unused_param is not None:
+ warnings.warn("The first parameter of the " + \
+ "portage.dbapi.porttree.portdbapi" + \
+ " constructor is unused since portage-2.1.8. " + \
+ "mysettings['PORTDIR'] is used instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.repositories = self.settings.repositories
+ self.treemap = self.repositories.treemap
+
+ # This is strictly for use in aux_get() doebuild calls when metadata
+ # is generated by the depend phase. It's safest to use a clone for
+ # this purpose because doebuild makes many changes to the config
+ # instance that is passed in.
+ self.doebuild_settings = config(clone=self.settings)
+ self.depcachedir = os.path.realpath(self.settings.depcachedir)
+
+ if os.environ.get("SANDBOX_ON") == "1":
+ # Make api consumers exempt from sandbox violations
+ # when doing metadata cache updates.
+ sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+ if self.depcachedir not in sandbox_write:
+ sandbox_write.append(self.depcachedir)
+ os.environ["SANDBOX_WRITE"] = \
+ ":".join(filter(None, sandbox_write))
+
+ self.porttrees = list(self.settings.repositories.repoLocationList())
+ self.eclassdb = eclass_cache.cache(self.settings.repositories.mainRepoLocation())
+
+ # This is used as sanity check for aux_get(). If there is no
+ # root eclass dir, we assume that PORTDIR is invalid or
+ # missing. This check allows aux_get() to detect a missing
+ # portage tree and return early by raising a KeyError.
+ self._have_root_eclass_dir = os.path.isdir(
+ os.path.join(self.settings.repositories.mainRepoLocation(), "eclass"))
+
+ self.metadbmodule = self.settings.load_best_module("portdbapi.metadbmodule")
+
+ #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+ self.xcache = {}
+ self.frozen = 0
+
+ #Create eclass dbs
+ self._repo_info = {}
+ eclass_dbs = {self.settings.repositories.mainRepoLocation() : self.eclassdb}
+ for repo in self.repositories:
+ if repo.location in self._repo_info:
+ continue
+
+ eclass_db = None
+ for eclass_location in repo.eclass_locations:
+ tree_db = eclass_dbs.get(eclass_location)
+ if tree_db is None:
+ tree_db = eclass_cache.cache(eclass_location)
+ eclass_dbs[eclass_location] = tree_db
+ if eclass_db is None:
+ eclass_db = tree_db.copy()
+ else:
+ eclass_db.append(tree_db)
+
+ self._repo_info[repo.location] = _repo_info(repo.name, repo.location, eclass_db)
+
+ #Keep a list of repo names, sorted by priority (highest priority first).
+ self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order))
+
+ self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule")
+ self.auxdb = {}
+ self._pregen_auxdb = {}
+ self._init_cache_dirs()
+ depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
+ cache_kwargs = {
+ 'gid' : portage_gid,
+ 'perms' : 0o664
+ }
+
+ # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
+ # ~harring
+ filtered_auxdbkeys = [x for x in auxdbkeys if not x.startswith("UNUSED_0")]
+ filtered_auxdbkeys.sort()
+ # If secpass < 1, we don't want to write to the cache
+ # since then we won't be able to apply group permissions
+ # to the cache entries/directories.
+ if secpass < 1 or not depcachedir_w_ok:
+ for x in self.porttrees:
+ try:
+ db_ro = self.auxdbmodule(self.depcachedir, x,
+ filtered_auxdbkeys, readonly=True, **cache_kwargs)
+ except CacheError:
+ self.auxdb[x] = volatile.database(
+ self.depcachedir, x, filtered_auxdbkeys,
+ **cache_kwargs)
+ else:
+ self.auxdb[x] = metadata_overlay.database(
+ self.depcachedir, x, filtered_auxdbkeys,
+ db_rw=volatile.database, db_ro=db_ro,
+ **cache_kwargs)
+ else:
+ for x in self.porttrees:
+ if x in self.auxdb:
+ continue
+ # location, label, auxdbkeys
+ self.auxdb[x] = self.auxdbmodule(
+ self.depcachedir, x, filtered_auxdbkeys, **cache_kwargs)
+ if self.auxdbmodule is metadata_overlay.database:
+ self.auxdb[x].db_ro.ec = self._repo_info[x].eclass_db
+ if "metadata-transfer" not in self.settings.features:
+ for x in self.porttrees:
+ if x in self._pregen_auxdb:
+ continue
+ if os.path.isdir(os.path.join(x, "metadata", "cache")):
+ self._pregen_auxdb[x] = self.metadbmodule(
+ x, "metadata/cache", filtered_auxdbkeys, readonly=True)
+ try:
+ self._pregen_auxdb[x].ec = self._repo_info[x].eclass_db
+ except AttributeError:
+ pass
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(
+ ["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
+ "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
+ "RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"])
+
+ self._aux_cache = {}
+ self._broken_ebuilds = set()
+
+ def _init_cache_dirs(self):
+ """Create /var/cache/edb/dep and adjust permissions for the portage
+ group."""
+
+ dirmode = 0o2070
+ filemode = 0o60
+ modemask = 0o2
+
+ try:
+ ensure_dirs(self.depcachedir, gid=portage_gid,
+ mode=dirmode, mask=modemask)
+ except PortageException as e:
+ pass
+
+ def close_caches(self):
+ if not hasattr(self, "auxdb"):
+ # unhandled exception thrown from constructor
+ return
+ for x in self.auxdb:
+ self.auxdb[x].sync()
+ self.auxdb.clear()
+
+ def flush_cache(self):
+ for x in self.auxdb.values():
+ x.sync()
+
+ def findLicensePath(self, license_name):
+ for x in reversed(self.porttrees):
+ license_path = os.path.join(x, "licenses", license_name)
+ if os.access(license_path, os.R_OK):
+ return license_path
+ return None
+
+ def findname(self,mycpv, mytree = None, myrepo = None):
+ return self.findname2(mycpv, mytree, myrepo)[0]
+
+ def getRepositoryPath(self, repository_id):
+ """
+ This function is required for GLEP 42 compliance; given a valid repository ID
+ it must return a path to the repository
+ TreeMap = { id:path }
+ """
+ return self.treemap.get(repository_id)
+
+ def getRepositoryName(self, canonical_repo_path):
+ """
+ This is the inverse of getRepositoryPath().
+ @param canonical_repo_path: the canonical path of a repository, as
+ resolved by os.path.realpath()
+ @type canonical_repo_path: String
+ @returns: The repo_name for the corresponding repository, or None
+ if the path does not correspond a known repository
+ @rtype: String or None
+ """
+ try:
+ return self.repositories.get_name_for_location(canonical_repo_path)
+ except KeyError:
+ return None
+
+ def getRepositories(self):
+ """
+ This function is required for GLEP 42 compliance; it will return a list of
+ repository IDs
+ TreeMap = {id: path}
+ """
+ return self._ordered_repo_name_list
+
+ def getMissingRepoNames(self):
+ """
+ Returns a list of repository paths that lack profiles/repo_name.
+ """
+ return self.settings.repositories.missing_repo_names
+
+ def getIgnoredRepos(self):
+ """
+ Returns a list of repository paths that have been ignored, because
+ another repo with the same name exists.
+ """
+ return self.settings.repositories.ignored_repos
+
+ def findname2(self, mycpv, mytree=None, myrepo = None):
+ """
+ Returns the location of the CPV, and what overlay it was in.
+ Searches overlays first, then PORTDIR; this allows us to return the first
+ matching file. As opposed to starting in portdir and then doing overlays
+ second, we would have to exhaustively search the overlays until we found
+ the file we wanted.
+ If myrepo is not None it will find packages from this repository(overlay)
+ """
+ if not mycpv:
+ return (None, 0)
+
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ return (None, 0)
+
+ mysplit = mycpv.split("/")
+ psplit = pkgsplit(mysplit[1])
+ if psplit is None or len(mysplit) != 2:
+ raise InvalidPackageName(mycpv)
+
+ # For optimal performace in this hot spot, we do manual unicode
+ # handling here instead of using the wrapped os module.
+ encoding = _encodings['fs']
+ errors = 'strict'
+
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = reversed(self.porttrees)
+
+ relative_path = mysplit[0] + _os.sep + psplit[0] + _os.sep + \
+ mysplit[1] + ".ebuild"
+
+ for x in mytrees:
+ filename = x + _os.sep + relative_path
+ if _os.access(_unicode_encode(filename,
+ encoding=encoding, errors=errors), _os.R_OK):
+ return (filename, x)
+ return (None, 0)
+
+ def _metadata_process(self, cpv, ebuild_path, repo_path):
+ """
+ Create an EbuildMetadataPhase instance to generate metadata for the
+ give ebuild.
+ @rtype: EbuildMetadataPhase
+ @returns: A new EbuildMetadataPhase instance, or None if the
+ metadata cache is already valid.
+ """
+ metadata, st, emtime = self._pull_valid_cache(cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ return None
+
+ process = EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
+ ebuild_mtime=emtime, metadata_callback=self._metadata_callback,
+ portdb=self, repo_path=repo_path, settings=self.doebuild_settings)
+ return process
+
+ def _metadata_callback(self, cpv, ebuild_path, repo_path, metadata, mtime):
+
+ i = metadata
+ if hasattr(metadata, "items"):
+ i = iter(metadata.items())
+ metadata = dict(i)
+
+ if metadata.get("INHERITED", False):
+ metadata["_eclasses_"] = self._repo_info[repo_path
+ ].eclass_db.get_eclass_data(metadata["INHERITED"].split())
+ else:
+ metadata["_eclasses_"] = {}
+
+ metadata.pop("INHERITED", None)
+ metadata["_mtime_"] = mtime
+
+ eapi = metadata.get("EAPI")
+ if not eapi or not eapi.strip():
+ eapi = "0"
+ metadata["EAPI"] = eapi
+ if not eapi_is_supported(eapi):
+ for k in set(metadata).difference(("_mtime_", "_eclasses_")):
+ metadata[k] = ""
+ metadata["EAPI"] = "-" + eapi.lstrip("-")
+
+ try:
+ self.auxdb[repo_path][cpv] = metadata
+ except CacheError:
+ # Normally this shouldn't happen, so we'll show
+ # a traceback for debugging purposes.
+ traceback.print_exc()
+ return metadata
+
+ def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
+ try:
+ # Don't use unicode-wrapped os module, for better performance.
+ st = _os.stat(_unicode_encode(ebuild_path,
+ encoding=_encodings['fs'], errors='strict'))
+ emtime = st[stat.ST_MTIME]
+ except OSError:
+ writemsg(_("!!! aux_get(): ebuild for " \
+ "'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
+ writemsg("!!! %s\n" % ebuild_path, noiselevel=-1)
+ raise KeyError(cpv)
+
+ # Pull pre-generated metadata from the metadata/cache/
+ # directory if it exists and is valid, otherwise fall
+ # back to the normal writable cache.
+ auxdbs = []
+ pregen_auxdb = self._pregen_auxdb.get(repo_path)
+ if pregen_auxdb is not None:
+ auxdbs.append(pregen_auxdb)
+ auxdbs.append(self.auxdb[repo_path])
+ eclass_db = self._repo_info[repo_path].eclass_db
+
+ doregen = True
+ for auxdb in auxdbs:
+ try:
+ metadata = auxdb[cpv]
+ except KeyError:
+ pass
+ except CacheError:
+ if auxdb is not pregen_auxdb:
+ try:
+ del auxdb[cpv]
+ except KeyError:
+ pass
+ except CacheError:
+ pass
+ else:
+ eapi = metadata.get('EAPI', '').strip()
+ if not eapi:
+ eapi = '0'
+ if not (eapi[:1] == '-' and eapi_is_supported(eapi[1:])) and \
+ emtime == metadata['_mtime_'] and \
+ eclass_db.is_eclass_data_valid(metadata['_eclasses_']):
+ doregen = False
+
+ if not doregen:
+ break
+
+ if doregen:
+ metadata = None
+
+ return (metadata, st, emtime)
+
+ def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
+ "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
+ cache_me = False
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ raise KeyError(myrepo)
+
+ if not mytree:
+ cache_me = True
+ if not mytree and not self._known_keys.intersection(
+ mylist).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ return [aux_cache.get(x, "") for x in mylist]
+ cache_me = True
+ global auxdbkeys, auxdbkeylen
+ try:
+ cat, pkg = mycpv.split("/", 1)
+ except ValueError:
+ # Missing slash. Can't find ebuild so raise KeyError.
+ raise KeyError(mycpv)
+
+ myebuild, mylocation = self.findname2(mycpv, mytree)
+
+ if not myebuild:
+ writemsg("!!! aux_get(): %s\n" % \
+ _("ebuild not found for '%s'") % mycpv, noiselevel=1)
+ raise KeyError(mycpv)
+
+ mydata, st, emtime = self._pull_valid_cache(mycpv, myebuild, mylocation)
+ doregen = mydata is None
+
+ if doregen:
+ if myebuild in self._broken_ebuilds:
+ raise KeyError(mycpv)
+
+ self.doebuild_settings.setcpv(mycpv)
+ eapi = None
+
+ if eapi is None and \
+ 'parse-eapi-ebuild-head' in self.doebuild_settings.features:
+ eapi = portage._parse_eapi_ebuild_head(io.open(
+ _unicode_encode(myebuild,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace'))
+
+ if eapi is not None:
+ self.doebuild_settings.configdict['pkg']['EAPI'] = eapi
+
+ if eapi is not None and not portage.eapi_is_supported(eapi):
+ mydata = self._metadata_callback(
+ mycpv, myebuild, mylocation, {'EAPI':eapi}, emtime)
+ else:
+ proc = EbuildMetadataPhase(cpv=mycpv, ebuild_path=myebuild,
+ ebuild_mtime=emtime,
+ metadata_callback=self._metadata_callback, portdb=self,
+ repo_path=mylocation,
+ scheduler=PollScheduler().sched_iface,
+ settings=self.doebuild_settings)
+
+ proc.start()
+ proc.wait()
+
+ if proc.returncode != os.EX_OK:
+ self._broken_ebuilds.add(myebuild)
+ raise KeyError(mycpv)
+
+ mydata = proc.metadata
+
+ # do we have a origin repository name for the current package
+ mydata["repository"] = self.repositories.get_name_for_location(mylocation)
+ mydata["INHERITED"] = ' '.join(mydata.get("_eclasses_", []))
+ mydata["_mtime_"] = st[stat.ST_MTIME]
+
+ eapi = mydata.get("EAPI")
+ if not eapi:
+ eapi = "0"
+ mydata["EAPI"] = eapi
+ if not eapi_is_supported(eapi):
+ for k in set(mydata).difference(("_mtime_", "_eclasses_")):
+ mydata[k] = ""
+ mydata["EAPI"] = "-" + eapi.lstrip("-")
+
+ #finally, we look at our internal cache entry and return the requested data.
+ returnme = [mydata.get(x, "") for x in mylist]
+
+ if cache_me:
+ aux_cache = {}
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, "")
+ self._aux_cache[mycpv] = aux_cache
+
+ return returnme
+
+ def getFetchMap(self, mypkg, useflags=None, mytree=None):
+ """
+ Get the SRC_URI metadata as a dict which maps each file name to a
+ set of alternative URIs.
+
+ @param mypkg: cpv for an ebuild
+ @type mypkg: String
+ @param useflags: a collection of enabled USE flags, for evaluation of
+ conditionals
+ @type useflags: set, or None to enable all conditionals
+ @param mytree: The canonical path of the tree in which the ebuild
+ is located, or None for automatic lookup
+ @type mypkg: String
+ @returns: A dict which maps each file name to a set of alternative
+ URIs.
+ @rtype: dict
+ """
+
+ try:
+ eapi, myuris = self.aux_get(mypkg,
+ ["EAPI", "SRC_URI"], mytree=mytree)
+ except KeyError:
+ # Convert this to an InvalidDependString exception since callers
+ # already handle it.
+ raise portage.exception.InvalidDependString(
+ "getFetchMap(): aux_get() error reading "+mypkg+"; aborting.")
+
+ if not eapi_is_supported(eapi):
+ # Convert this to an InvalidDependString exception
+ # since callers already handle it.
+ raise portage.exception.InvalidDependString(
+ "getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
+ (mypkg, eapi.lstrip("-")))
+
+ return _parse_uri_map(mypkg, {'EAPI':eapi,'SRC_URI':myuris},
+ use=useflags)
+
+ def getfetchsizes(self, mypkg, useflags=None, debug=0, myrepo=None):
+ # returns a filename:size dictionnary of remaining downloads
+ myebuild, mytree = self.findname2(mypkg, myrepo=myrepo)
+ if myebuild is None:
+ raise AssertionError(_("ebuild not found for '%s'") % mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.settings["DISTDIR"])
+ checksums = mf.getDigests()
+ if not checksums:
+ if debug:
+ writemsg(_("[empty/missing/bad digest]: %s\n") % (mypkg,))
+ return {}
+ filesdict={}
+ myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+ #XXX: maybe this should be improved: take partial downloads
+ # into account? check checksums?
+ for myfile in myfiles:
+ try:
+ fetch_size = int(checksums[myfile]["size"])
+ except (KeyError, ValueError):
+ if debug:
+ writemsg(_("[bad digest]: missing %(file)s for %(pkg)s\n") % {"file":myfile, "pkg":mypkg})
+ continue
+ file_path = os.path.join(self.settings["DISTDIR"], myfile)
+ mystat = None
+ try:
+ mystat = os.stat(file_path)
+ except OSError as e:
+ pass
+ if mystat is None:
+ existing_size = 0
+ ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS")
+ if ro_distdirs is not None:
+ for x in shlex_split(ro_distdirs):
+ try:
+ mystat = os.stat(os.path.join(x, myfile))
+ except OSError:
+ pass
+ else:
+ if mystat.st_size == fetch_size:
+ existing_size = fetch_size
+ break
+ else:
+ existing_size = mystat.st_size
+ remaining_size = fetch_size - existing_size
+ if remaining_size > 0:
+ # Assume the download is resumable.
+ filesdict[myfile] = remaining_size
+ elif remaining_size < 0:
+ # The existing file is too large and therefore corrupt.
+ filesdict[myfile] = int(checksums[myfile]["size"])
+ return filesdict
+
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False, myrepo=None):
+ """
+ TODO: account for PORTAGE_RO_DISTDIRS
+ """
+ if all:
+ useflags = None
+ elif useflags is None:
+ if mysettings:
+ useflags = mysettings["USE"].split()
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ return False
+ else:
+ mytree = None
+
+ myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+ myebuild = self.findname(mypkg, myrepo=myrepo)
+ if myebuild is None:
+ raise AssertionError(_("ebuild not found for '%s'") % mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.settings["DISTDIR"])
+ mysums = mf.getDigests()
+
+ failures = {}
+ for x in myfiles:
+ if not mysums or x not in mysums:
+ ok = False
+ reason = _("digest missing")
+ else:
+ try:
+ ok, reason = portage.checksum.verify_all(
+ os.path.join(self.settings["DISTDIR"], x), mysums[x])
+ except FileNotFound as e:
+ ok = False
+ reason = _("File Not Found: '%s'") % (e,)
+ if not ok:
+ failures[x] = reason
+ if failures:
+ return False
+ return True
+
+ def cpv_exists(self, mykey, myrepo=None):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ cps2 = mykey.split("/")
+ cps = catpkgsplit(mykey, silent=0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
+ if self.findname(cps[0] + "/" + cps2[1], myrepo=myrepo):
+ return 1
+ else:
+ return 0
+
+ def cp_all(self, categories=None, trees=None):
+ """
+ This returns a list of all keys in our tree or trees
+ @param categories: optional list of categories to search or
+ defaults to self.settings.categories
+ @param trees: optional list of trees to search the categories in or
+ defaults to self.porttrees
+ @rtype list of [cat/pkg,...]
+ """
+ d = {}
+ if categories is None:
+ categories = self.settings.categories
+ if trees is None:
+ trees = self.porttrees
+ for x in categories:
+ for oroot in trees:
+ for y in listdir(oroot+"/"+x, EmptyOnError=1, ignorecvs=1, dirsonly=1):
+ try:
+ atom = Atom("%s/%s" % (x, y))
+ except InvalidAtom:
+ continue
+ if atom != atom.cp:
+ continue
+ d[atom.cp] = None
+ l = list(d)
+ l.sort()
+ return l
+
+ def cp_list(self, mycp, use_cache=1, mytree=None):
+ if self.frozen and mytree is None:
+ cachelist = self.xcache["cp-list"].get(mycp)
+ if cachelist is not None:
+ # Try to propagate this to the match-all cache here for
+ # repoman since he uses separate match-all caches for each
+ # profile (due to old-style virtuals). Do not propagate
+ # old-style virtuals since cp_list() doesn't expand them.
+ if not (not cachelist and mycp.startswith("virtual/")):
+ self.xcache["match-all"][mycp] = cachelist
+ return cachelist[:]
+ mysplit = mycp.split("/")
+ invalid_category = mysplit[0] not in self._categories
+ d={}
+ if mytree is not None:
+ if isinstance(mytree, basestring):
+ mytrees = [mytree]
+ else:
+ # assume it's iterable
+ mytrees = mytree
+ else:
+ mytrees = self.porttrees
+ for oroot in mytrees:
+ try:
+ file_list = os.listdir(os.path.join(oroot, mycp))
+ except OSError:
+ continue
+ for x in file_list:
+ pf = None
+ if x[-7:] == '.ebuild':
+ pf = x[:-7]
+
+ if pf is not None:
+ ps = pkgsplit(pf)
+ if not ps:
+ writemsg(_("\nInvalid ebuild name: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ if ps[0] != mysplit[1]:
+ writemsg(_("\nInvalid ebuild name: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ ver_match = ver_regexp.match("-".join(ps[1:]))
+ if ver_match is None or not ver_match.groups():
+ writemsg(_("\nInvalid ebuild version: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ d[mysplit[0]+"/"+pf] = None
+ if invalid_category and d:
+ writemsg(_("\n!!! '%s' has a category that is not listed in " \
+ "%setc/portage/categories\n") % \
+ (mycp, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1)
+ mylist = []
+ else:
+ mylist = list(d)
+ # Always sort in ascending order here since it's handy
+ # and the result can be easily cached and reused.
+ self._cpv_sort_ascending(mylist)
+ if self.frozen and mytree is None:
+ cachelist = mylist[:]
+ self.xcache["cp-list"][mycp] = cachelist
+ # Do not propagate old-style virtuals since
+ # cp_list() doesn't expand them.
+ if not (not cachelist and mycp.startswith("virtual/")):
+ self.xcache["match-all"][mycp] = cachelist
+ return mylist
+
+ def freeze(self):
+ for x in "bestmatch-visible", "cp-list", "list-visible", "match-all", \
+ "match-all-cpv-only", "match-visible", "minimum-all", \
+ "minimum-visible":
+ self.xcache[x]={}
+ self.frozen=1
+
+ def melt(self):
+ self.xcache = {}
+ self.frozen = 0
+
+ def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+ "caching match function; very trick stuff"
+ #if no updates are being made to the tree, we can consult our xcache...
+ if self.frozen:
+ try:
+ return self.xcache[level][origdep][:]
+ except KeyError:
+ pass
+
+ if mydep is None:
+ #this stuff only runs on first call of xmatch()
+ #create mydep, mykey from origdep
+ mydep = dep_expand(origdep, mydb=self, settings=self.settings)
+ mykey = mydep.cp
+
+ myval = None
+ mytree = None
+ if mydep.repo is not None:
+ mytree = self.treemap.get(mydep.repo)
+ if mytree is None:
+ myval = []
+
+ if myval is not None:
+ # Unknown repo, empty result.
+ pass
+ elif level == "match-all-cpv-only":
+ # match *all* packages, only against the cpv, in order
+ # to bypass unnecessary cache access for things like IUSE
+ # and SLOT.
+ if mydep == mykey:
+ # Share cache with match-all/cp_list when the result is the
+ # same. Note that this requires that mydep.repo is None and
+ # thus mytree is also None.
+ level = "match-all"
+ myval = self.cp_list(mykey, mytree=mytree)
+ else:
+ myval = match_from_list(mydep,
+ self.cp_list(mykey, mytree=mytree))
+
+ elif level == "list-visible":
+ #a list of all visible packages, not called directly (just by xmatch())
+ #myval = self.visible(self.cp_list(mykey))
+
+ myval = self.gvisible(self.visible(
+ self.cp_list(mykey, mytree=mytree)))
+ elif level == "minimum-all":
+ # Find the minimum matching version. This is optimized to
+ # minimize the number of metadata accesses (improves performance
+ # especially in cases where metadata needs to be generated).
+ if mydep == mykey:
+ cpv_iter = iter(self.cp_list(mykey, mytree=mytree))
+ else:
+ cpv_iter = self._iter_match(mydep,
+ self.cp_list(mykey, mytree=mytree))
+ try:
+ myval = next(cpv_iter)
+ except StopIteration:
+ myval = ""
+
+ elif level in ("minimum-visible", "bestmatch-visible"):
+ # Find the minimum matching visible version. This is optimized to
+ # minimize the number of metadata accesses (improves performance
+ # especially in cases where metadata needs to be generated).
+ if mydep == mykey:
+ mylist = self.cp_list(mykey, mytree=mytree)
+ else:
+ mylist = match_from_list(mydep,
+ self.cp_list(mykey, mytree=mytree))
+ myval = ""
+ settings = self.settings
+ local_config = settings.local_config
+ aux_keys = list(self._aux_cache_keys)
+ if level == "minimum-visible":
+ iterfunc = iter
+ else:
+ iterfunc = reversed
+ for cpv in iterfunc(mylist):
+ try:
+ metadata = dict(zip(aux_keys,
+ self.aux_get(cpv, aux_keys)))
+ except KeyError:
+ # ebuild masked by corruption
+ continue
+ if not eapi_is_supported(metadata["EAPI"]):
+ continue
+ if mydep.slot and mydep.slot != metadata["SLOT"]:
+ continue
+ if settings._getMissingKeywords(cpv, metadata):
+ continue
+ if settings._getMaskAtom(cpv, metadata):
+ continue
+ if settings._getProfileMaskAtom(cpv, metadata):
+ continue
+ if local_config:
+ metadata["USE"] = ""
+ if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
+ self.doebuild_settings.setcpv(cpv, mydb=metadata)
+ metadata["USE"] = self.doebuild_settings.get("USE", "")
+ try:
+ if settings._getMissingLicenses(cpv, metadata):
+ continue
+ if settings._getMissingProperties(cpv, metadata):
+ continue
+ except InvalidDependString:
+ continue
+ if mydep.use:
+ has_iuse = False
+ for has_iuse in self._iter_match_use(mydep, [cpv]):
+ break
+ if not has_iuse:
+ continue
+ myval = cpv
+ break
+ elif level == "bestmatch-list":
+ #dep match -- find best match but restrict search to sublist
+ #no point in calling xmatch again since we're not caching list deps
+
+ myval = best(list(self._iter_match(mydep, mylist)))
+ elif level == "match-list":
+ #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+
+ myval = list(self._iter_match(mydep, mylist))
+ elif level == "match-visible":
+ #dep match -- find all visible matches
+ #get all visible packages, then get the matching ones
+ myval = list(self._iter_match(mydep,
+ self.xmatch("list-visible", mykey, mydep=Atom(mykey), mykey=mykey)))
+ elif level == "match-all":
+ #match *all* visible *and* masked packages
+ if mydep == mykey:
+ myval = self.cp_list(mykey, mytree=mytree)
+ else:
+ myval = list(self._iter_match(mydep,
+ self.cp_list(mykey, mytree=mytree)))
+ else:
+ raise AssertionError(
+ "Invalid level argument: '%s'" % level)
+
+ if self.frozen and (level not in ["match-list", "bestmatch-list"]):
+ self.xcache[level][mydep] = myval
+ if origdep and origdep != mydep:
+ self.xcache[level][origdep] = myval
+ return myval[:]
+
+ def match(self, mydep, use_cache=1):
+ return self.xmatch("match-visible", mydep)
+
+ def visible(self, mylist):
+ """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
+ packages file to remove invisible entries, returning remaining items. This function assumes
+ that all entries in mylist have the same category and package name."""
+ if not mylist:
+ return []
+
+ db_keys = ["SLOT"]
+ visible = []
+ getMaskAtom = self.settings._getMaskAtom
+ getProfileMaskAtom = self.settings._getProfileMaskAtom
+ for cpv in mylist:
+ try:
+ metadata = dict(zip(db_keys, self.aux_get(cpv, db_keys)))
+ except KeyError:
+ # masked by corruption
+ continue
+ if not metadata["SLOT"]:
+ continue
+ if getMaskAtom(cpv, metadata):
+ continue
+ if getProfileMaskAtom(cpv, metadata):
+ continue
+ visible.append(cpv)
+ return visible
+
+ def gvisible(self,mylist):
+ "strip out group-masked (not in current group) entries"
+
+ if mylist is None:
+ return []
+ newlist=[]
+ aux_keys = list(self._aux_cache_keys)
+ metadata = {}
+ local_config = self.settings.local_config
+ chost = self.settings.get('CHOST', '')
+ accept_chost = self.settings._accept_chost
+ for mycpv in mylist:
+ metadata.clear()
+ try:
+ metadata.update(zip(aux_keys, self.aux_get(mycpv, aux_keys)))
+ except KeyError:
+ continue
+ except PortageException as e:
+ writemsg("!!! Error: aux_get('%s', %s)\n" % (mycpv, aux_keys),
+ noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ del e
+ continue
+ eapi = metadata["EAPI"]
+ if not eapi_is_supported(eapi):
+ continue
+ if _eapi_is_deprecated(eapi):
+ continue
+ if self.settings._getMissingKeywords(mycpv, metadata):
+ continue
+ if local_config:
+ metadata['CHOST'] = chost
+ if not accept_chost(mycpv, metadata):
+ continue
+ metadata["USE"] = ""
+ if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
+ self.doebuild_settings.setcpv(mycpv, mydb=metadata)
+ metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
+ try:
+ if self.settings._getMissingLicenses(mycpv, metadata):
+ continue
+ if self.settings._getMissingProperties(mycpv, metadata):
+ continue
+ except InvalidDependString:
+ continue
+ newlist.append(mycpv)
+ return newlist
+
+def close_portdbapi_caches():
+ for i in portdbapi.portdbapi_instances:
+ i.close_caches()
+
+portage.process.atexit_register(portage.portageexit)
+
+class portagetree(object):
+ def __init__(self, root=None, virtual=None, settings=None):
+ """
+ Constructor for a PortageTree
+
+ @param root: deprecated, defaults to settings['ROOT']
+ @type root: String/Path
+ @param virtual: UNUSED
+ @type virtual: No Idea
+ @param settings: Portage Configuration object (portage.settings)
+ @type settings: Instance of portage.config
+ """
+
+ if settings is None:
+ settings = portage.settings
+ self.settings = settings
+
+ if root is not None and root != settings['ROOT']:
+ warnings.warn("The root parameter of the " + \
+ "portage.dbapi.porttree.portagetree" + \
+ " constructor is now unused. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.portroot = settings["PORTDIR"]
+ self.virtual = virtual
+ self.dbapi = portdbapi(mysettings=settings)
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of " + \
+ "portage.dbapi.porttree.portagetree" + \
+ " is deprecated. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+ return self.settings['ROOT']
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method"
+ mymatch = self.dbapi.xmatch("bestmatch-visible",mydep)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def dep_match(self,mydep):
+ "compatibility method"
+ mymatch = self.dbapi.xmatch("match-visible",mydep)
+ if mymatch is None:
+ return []
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getname(self, pkgname):
+ "returns file location for this particular package (DEPRECATED)"
+ if not pkgname:
+ return ""
+ mysplit = pkgname.split("/")
+ psplit = pkgsplit(mysplit[1])
+ return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild"
+
+ def depcheck(self, mycheck, use="yes", myusesplit=None):
+ return dep_check(mycheck, self.dbapi, use=use, myuse=myusesplit)
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ pass
+ return myslot
+
+class FetchlistDict(Mapping):
+ """
+ This provide a mapping interface to retrieve fetch lists. It's used
+ to allow portage.manifest.Manifest to access fetch lists via a standard
+ mapping interface rather than use the dbapi directly.
+ """
+ def __init__(self, pkgdir, settings, mydbapi):
+ """pkgdir is a directory containing ebuilds and settings is passed into
+ portdbapi.getfetchlist for __getitem__ calls."""
+ self.pkgdir = pkgdir
+ self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
+ self.settings = settings
+ self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
+ self.portdb = mydbapi
+
+ def __getitem__(self, pkg_key):
+ """Returns the complete fetch list for a given package."""
+ return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
+
+ def __contains__(self, cpv):
+ return cpv in self.__iter__()
+
+ def has_key(self, pkg_key):
+ """Returns true if the given package exists within pkgdir."""
+ warnings.warn("portage.dbapi.porttree.FetchlistDict.has_key() is "
+ "deprecated, use the 'in' operator instead",
+ DeprecationWarning, stacklevel=2)
+ return pkg_key in self
+
+ def __iter__(self):
+ return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
+
+ def __len__(self):
+ """This needs to be implemented in order to avoid
+ infinite recursion in some cases."""
+ return len(self.portdb.cp_list(self.cp, mytree=self.mytree))
+
+ def keys(self):
+ """Returns keys for all packages within pkgdir"""
+ return self.portdb.cp_list(self.cp, mytree=self.mytree)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+def _parse_uri_map(cpv, metadata, use=None):
+
+ myuris = use_reduce(metadata.get('SRC_URI', ''),
+ uselist=use, matchall=(use is None),
+ is_src_uri=True,
+ eapi=metadata['EAPI'])
+
+ uri_map = OrderedDict()
+
+ myuris.reverse()
+ while myuris:
+ uri = myuris.pop()
+ if myuris and myuris[-1] == "->":
+ operator = myuris.pop()
+ distfile = myuris.pop()
+ else:
+ distfile = os.path.basename(uri)
+ if not distfile:
+ raise portage.exception.InvalidDependString(
+ ("getFetchMap(): '%s' SRC_URI has no file " + \
+ "name: '%s'") % (cpv, uri))
+
+ uri_set = uri_map.get(distfile)
+ if uri_set is None:
+ uri_set = set()
+ uri_map[distfile] = uri_set
+ uri_set.add(uri)
+ uri = None
+ operator = None
+
+ return uri_map
diff --git a/portage_with_autodep/pym/portage/dbapi/vartree.py b/portage_with_autodep/pym/portage/dbapi/vartree.py
new file mode 100644
index 0000000..7f7873b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/vartree.py
@@ -0,0 +1,4527 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+ "vardbapi", "vartree", "dblink"] + \
+ ["write_contents", "tar_contents"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum:_perform_md5_merge@perform_md5',
+ 'portage.data:portage_gid,portage_uid,secpass',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dbapi._MergeProcess:MergeProcess',
+ 'portage.dep:dep_getkey,isjustname,match_from_list,' + \
+ 'use_reduce,_slot_re',
+ 'portage.elog:collect_ebuild_messages,collect_messages,' + \
+ 'elog_process,_merge_logentries',
+ 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
+ 'portage.output:bold,colorize',
+ 'portage.package.ebuild.doebuild:doebuild_environment,' + \
+ '_spawn_phase',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+ 'portage.update:fixdbentries',
+ 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
+ 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
+ 'grabdict,normalize_path,new_protect_filename',
+ 'portage.util.digraph:digraph',
+ 'portage.util.env_update:env_update',
+ 'portage.util.listdir:dircache,listdir',
+ 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
+ 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
+ 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \
+ '_pkgsplit@pkgsplit',
+)
+
+from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
+ PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
+from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
+from portage.dbapi import dbapi
+from portage.exception import CommandNotFound, \
+ InvalidData, InvalidLocation, InvalidPackageName, \
+ FileNotFound, PermissionDenied, UnsupportedAPIException
+from portage.localization import _
+from portage.util.movefile import movefile
+
+from portage import abssymlink, _movefile, bsd_chflags
+
+# This is a special version of the os module, wrapped for unicode support.
+from portage import os
+from portage import _encodings
+from portage import _os_merge
+from portage import _selinux_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.PollScheduler import PollScheduler
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+
+import errno
+import gc
+import io
+from itertools import chain
+import logging
+import os as _os
+import re
+import shutil
+import stat
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+
+class vardbapi(dbapi):
+
+ _excluded_dirs = ["CVS", "lost+found"]
+ _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
+ _excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
+ "|".join(_excluded_dirs) + r')$')
+
+ _aux_cache_version = "1"
+ _owners_cache_version = "1"
+
+ # Number of uncached packages to trigger cache update, since
+ # it's wasteful to update it for every vdb change.
+ _aux_cache_threshold = 5
+
+ _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
+ _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
+
+ def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
+ """
+ The categories parameter is unused since the dbapi class
+ now has a categories property that is generated from the
+ available packages.
+ """
+
+ # Used by emerge to check whether any packages
+ # have been added or removed.
+ self._pkgs_changed = False
+
+ # The _aux_cache_threshold doesn't work as designed
+ # if the cache is flushed from a subprocess, so we
+ # use this to avoid waste vdb cache updates.
+ self._flush_cache_enabled = True
+
+ #cache for category directory mtimes
+ self.mtdircache = {}
+
+ #cache for dependency checks
+ self.matchcache = {}
+
+ #cache for cp_list results
+ self.cpcache = {}
+
+ self.blockers = None
+ if settings is None:
+ settings = portage.settings
+ self.settings = settings
+ self.root = settings['ROOT']
+
+ if _unused_param is not None and _unused_param != self.root:
+ warnings.warn("The first parameter of the " + \
+ "portage.dbapi.vartree.vardbapi" + \
+ " constructor is now unused. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self._eroot = settings['EROOT']
+ self._dbroot = self._eroot + VDB_PATH
+ self._lock = None
+ self._lock_count = 0
+
+ self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
+ self._fs_lock_obj = None
+ self._fs_lock_count = 0
+
+ if vartree is None:
+ vartree = portage.db[self.root]["vartree"]
+ self.vartree = vartree
+ self._aux_cache_keys = set(
+ ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
+ "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
+ "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
+ "REQUIRED_USE"])
+ self._aux_cache_obj = None
+ self._aux_cache_filename = os.path.join(self._eroot,
+ CACHE_PATH, "vdb_metadata.pickle")
+ self._counter_path = os.path.join(self._eroot,
+ CACHE_PATH, "counter")
+
+ self._plib_registry = None
+ if _ENABLE_PRESERVE_LIBS:
+ self._plib_registry = PreservedLibsRegistry(self.root,
+ os.path.join(self._eroot, PRIVATE_PATH,
+ "preserved_libs_registry"))
+
+ self._linkmap = None
+ if _ENABLE_DYN_LINK_MAP:
+ self._linkmap = LinkageMap(self)
+ self._owners = self._owners_db(self)
+
+ self._cached_counter = None
+
+ def getpath(self, mykey, filename=None):
+ # This is an optimized hotspot, so don't use unicode-wrapped
+ # os module and don't use os.path.join().
+ rValue = self._eroot + VDB_PATH + _os.sep + mykey
+ if filename is not None:
+ # If filename is always relative, we can do just
+ # rValue += _os.sep + filename
+ rValue = _os.path.join(rValue, filename)
+ return rValue
+
+ def lock(self):
+ """
+ Acquire a reentrant lock, blocking, for cooperation with concurrent
+ processes. State is inherited by subprocesses, allowing subprocesses
+ to reenter a lock that was acquired by a parent process. However,
+ a lock can be released only by the same process that acquired it.
+ """
+ if self._lock_count:
+ self._lock_count += 1
+ else:
+ if self._lock is not None:
+ raise AssertionError("already locked")
+ # At least the parent needs to exist for the lock file.
+ ensure_dirs(self._dbroot)
+ self._lock = lockdir(self._dbroot)
+ self._lock_count += 1
+
+ def unlock(self):
+ """
+ Release a lock, decrementing the recursion level. Each unlock() call
+ must be matched with a prior lock() call, or else an AssertionError
+ will be raised if unlock() is called while not locked.
+ """
+ if self._lock_count > 1:
+ self._lock_count -= 1
+ else:
+ if self._lock is None:
+ raise AssertionError("not locked")
+ self._lock_count = 0
+ unlockdir(self._lock)
+ self._lock = None
+
+ def _fs_lock(self):
+ """
+ Acquire a reentrant lock, blocking, for cooperation with concurrent
+ processes.
+ """
+ if self._fs_lock_count < 1:
+ if self._fs_lock_obj is not None:
+ raise AssertionError("already locked")
+ try:
+ self._fs_lock_obj = lockfile(self._conf_mem_file)
+ except InvalidLocation:
+ self.settings._init_dirs()
+ self._fs_lock_obj = lockfile(self._conf_mem_file)
+ self._fs_lock_count += 1
+
+ def _fs_unlock(self):
+ """
+ Release a lock, decrementing the recursion level.
+ """
+ if self._fs_lock_count <= 1:
+ if self._fs_lock_obj is None:
+ raise AssertionError("not locked")
+ unlockfile(self._fs_lock_obj)
+ self._fs_lock_obj = None
+ self._fs_lock_count -= 1
+
+ def _bump_mtime(self, cpv):
+ """
+ This is called before an after any modifications, so that consumers
+ can use directory mtimes to validate caches. See bug #290428.
+ """
+ base = self._eroot + VDB_PATH
+ cat = catsplit(cpv)[0]
+ catdir = base + _os.sep + cat
+ t = time.time()
+ t = (t, t)
+ try:
+ for x in (catdir, base):
+ os.utime(x, t)
+ except OSError:
+ ensure_dirs(catdir)
+
+ def cpv_exists(self, mykey, myrepo=None):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ return os.path.exists(self.getpath(mykey))
+
+ def cpv_counter(self, mycpv):
+ "This method will grab the COUNTER. Returns a counter value."
+ try:
+ return long(self.aux_get(mycpv, ["COUNTER"])[0])
+ except (KeyError, ValueError):
+ pass
+ writemsg_level(_("portage: COUNTER for %s was corrupted; " \
+ "resetting to value of 0\n") % (mycpv,),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+
+ def cpv_inject(self, mycpv):
+ "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
+ ensure_dirs(self.getpath(mycpv))
+ counter = self.counter_tick(mycpv=mycpv)
+ # write local package counter so that emerge clean does the right thing
+ write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
+
+ def isInjected(self, mycpv):
+ if self.cpv_exists(mycpv):
+ if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
+ return True
+ if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
+ return True
+ return False
+
+ def move_ent(self, mylist, repo_match=None):
+ origcp = mylist[1]
+ newcp = mylist[2]
+
+ # sanity check
+ for atom in (origcp, newcp):
+ if not isjustname(atom):
+ raise InvalidPackageName(str(atom))
+ origmatches = self.match(origcp, use_cache=0)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ mycpv_cp = cpv_getkey(mycpv)
+ if mycpv_cp != origcp:
+ # Ignore PROVIDE virtual match.
+ continue
+ if repo_match is not None \
+ and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
+ continue
+ mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
+ mynewcat = catsplit(newcp)[0]
+ origpath = self.getpath(mycpv)
+ if not os.path.exists(origpath):
+ continue
+ moves += 1
+ if not os.path.exists(self.getpath(mynewcat)):
+ #create the directory
+ ensure_dirs(self.getpath(mynewcat))
+ newpath = self.getpath(mynewcpv)
+ if os.path.exists(newpath):
+ #dest already exists; keep this puppy where it is.
+ continue
+ _movefile(origpath, newpath, mysettings=self.settings)
+ self._clear_pkg_cache(self._dblink(mycpv))
+ self._clear_pkg_cache(self._dblink(mynewcpv))
+
+ # We need to rename the ebuild now.
+ old_pf = catsplit(mycpv)[1]
+ new_pf = catsplit(mynewcpv)[1]
+ if new_pf != old_pf:
+ try:
+ os.rename(os.path.join(newpath, old_pf + ".ebuild"),
+ os.path.join(newpath, new_pf + ".ebuild"))
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
+ write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
+ fixdbentries([mylist], newpath)
+ return moves
+
+ def cp_list(self, mycp, use_cache=1):
+ mysplit=catsplit(mycp)
+ if mysplit[0] == '*':
+ mysplit[0] = mysplit[0][1:]
+ try:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime
+ except OSError:
+ mystat = 0
+ if use_cache and mycp in self.cpcache:
+ cpc = self.cpcache[mycp]
+ if cpc[0] == mystat:
+ return cpc[1][:]
+ cat_dir = self.getpath(mysplit[0])
+ try:
+ dir_list = os.listdir(cat_dir)
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(cat_dir)
+ del e
+ dir_list = []
+
+ returnme = []
+ for x in dir_list:
+ if self._excluded_dirs.match(x) is not None:
+ continue
+ ps = pkgsplit(x)
+ if not ps:
+ self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
+ continue
+ if len(mysplit) > 1:
+ if ps[0] == mysplit[1]:
+ returnme.append(mysplit[0]+"/"+x)
+ self._cpv_sort_ascending(returnme)
+ if use_cache:
+ self.cpcache[mycp] = [mystat, returnme[:]]
+ elif mycp in self.cpcache:
+ del self.cpcache[mycp]
+ return returnme
+
+ def cpv_all(self, use_cache=1):
+ """
+ Set use_cache=0 to bypass the portage.cachedir() cache in cases
+ when the accuracy of mtime staleness checks should not be trusted
+ (generally this is only necessary in critical sections that
+ involve merge or unmerge of packages).
+ """
+ returnme = []
+ basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
+
+ if use_cache:
+ from portage import listdir
+ else:
+ def listdir(p, **kwargs):
+ try:
+ return [x for x in os.listdir(p) \
+ if os.path.isdir(os.path.join(p, x))]
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(p)
+ del e
+ return []
+
+ for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
+ if self._excluded_dirs.match(x) is not None:
+ continue
+ if not self._category_re.match(x):
+ continue
+ for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
+ if self._excluded_dirs.match(y) is not None:
+ continue
+ subpath = x + "/" + y
+ # -MERGING- should never be a cpv, nor should files.
+ try:
+ if catpkgsplit(subpath) is None:
+ self.invalidentry(self.getpath(subpath))
+ continue
+ except InvalidData:
+ self.invalidentry(self.getpath(subpath))
+ continue
+ returnme.append(subpath)
+
+ return returnme
+
+ def cp_all(self, use_cache=1):
+ mylist = self.cpv_all(use_cache=use_cache)
+ d={}
+ for y in mylist:
+ if y[0] == '*':
+ y = y[1:]
+ try:
+ mysplit = catpkgsplit(y)
+ except InvalidData:
+ self.invalidentry(self.getpath(y))
+ continue
+ if not mysplit:
+ self.invalidentry(self.getpath(y))
+ continue
+ d[mysplit[0]+"/"+mysplit[1]] = None
+ return list(d)
+
+ def checkblockers(self, origdep):
+ pass
+
+ def _clear_cache(self):
+ self.mtdircache.clear()
+ self.matchcache.clear()
+ self.cpcache.clear()
+ self._aux_cache_obj = None
+
+ def _add(self, pkg_dblink):
+ self._pkgs_changed = True
+ self._clear_pkg_cache(pkg_dblink)
+
+ def _remove(self, pkg_dblink):
+ self._pkgs_changed = True
+ self._clear_pkg_cache(pkg_dblink)
+
+ def _clear_pkg_cache(self, pkg_dblink):
+ # Due to 1 second mtime granularity in <python-2.5, mtime checks
+ # are not always sufficient to invalidate vardbapi caches. Therefore,
+ # the caches need to be actively invalidated here.
+ self.mtdircache.pop(pkg_dblink.cat, None)
+ self.matchcache.pop(pkg_dblink.cat, None)
+ self.cpcache.pop(pkg_dblink.mysplit[0], None)
+ dircache.pop(pkg_dblink.dbcatdir, None)
+
+ def match(self, origdep, use_cache=1):
+ "caching match function"
+ mydep = dep_expand(
+ origdep, mydb=self, use_cache=use_cache, settings=self.settings)
+ mykey = dep_getkey(mydep)
+ mycat = catsplit(mykey)[0]
+ if not use_cache:
+ if mycat in self.matchcache:
+ del self.mtdircache[mycat]
+ del self.matchcache[mycat]
+ return list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+ try:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
+ except (IOError, OSError):
+ curmtime=0
+
+ if mycat not in self.matchcache or \
+ self.mtdircache[mycat] != curmtime:
+ # clear cache entry
+ self.mtdircache[mycat] = curmtime
+ self.matchcache[mycat] = {}
+ if mydep not in self.matchcache[mycat]:
+ mymatch = list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+ self.matchcache[mycat][mydep] = mymatch
+ return self.matchcache[mycat][mydep][:]
+
+ def findname(self, mycpv, myrepo=None):
+ return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
+
+ def flush_cache(self):
+ """If the current user has permission and the internal aux_get cache has
+ been updated, save it to disk and mark it unmodified. This is called
+ by emerge after it has loaded the full vdb for use in dependency
+ calculations. Currently, the cache is only written if the user has
+ superuser privileges (since that's required to obtain a lock), but all
+ users have read access and benefit from faster metadata lookups (as
+ long as at least part of the cache is still valid)."""
+ if self._flush_cache_enabled and \
+ self._aux_cache is not None and \
+ len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
+ secpass >= 2:
+ self._owners.populate() # index any unindexed contents
+ valid_nodes = set(self.cpv_all())
+ for cpv in list(self._aux_cache["packages"]):
+ if cpv not in valid_nodes:
+ del self._aux_cache["packages"][cpv]
+ del self._aux_cache["modified"]
+ try:
+ f = atomic_ofstream(self._aux_cache_filename, 'wb')
+ pickle.dump(self._aux_cache, f, protocol=2)
+ f.close()
+ apply_secpass_permissions(
+ self._aux_cache_filename, gid=portage_gid, mode=0o644)
+ except (IOError, OSError) as e:
+ pass
+ self._aux_cache["modified"] = set()
+
+ @property
+ def _aux_cache(self):
+ if self._aux_cache_obj is None:
+ self._aux_cache_init()
+ return self._aux_cache_obj
+
+ def _aux_cache_init(self):
+ aux_cache = None
+ open_kwargs = {}
+ if sys.hexversion >= 0x3000000:
+ # Buffered io triggers extreme performance issues in
+ # Unpickler.load() (problem observed with python-3.0.1).
+ # Unfortunately, performance is still poor relative to
+ # python-2.x, but buffering makes it much worse.
+ open_kwargs["buffering"] = 0
+ try:
+ f = open(_unicode_encode(self._aux_cache_filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='rb', **open_kwargs)
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ aux_cache = mypickle.load()
+ f.close()
+ del f
+ except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
+ if isinstance(e, pickle.UnpicklingError):
+ writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
+ (self._aux_cache_filename, e), noiselevel=-1)
+ del e
+
+ if not aux_cache or \
+ not isinstance(aux_cache, dict) or \
+ aux_cache.get("version") != self._aux_cache_version or \
+ not aux_cache.get("packages"):
+ aux_cache = {"version": self._aux_cache_version}
+ aux_cache["packages"] = {}
+
+ owners = aux_cache.get("owners")
+ if owners is not None:
+ if not isinstance(owners, dict):
+ owners = None
+ elif "version" not in owners:
+ owners = None
+ elif owners["version"] != self._owners_cache_version:
+ owners = None
+ elif "base_names" not in owners:
+ owners = None
+ elif not isinstance(owners["base_names"], dict):
+ owners = None
+
+ if owners is None:
+ owners = {
+ "base_names" : {},
+ "version" : self._owners_cache_version
+ }
+ aux_cache["owners"] = owners
+
+ aux_cache["modified"] = set()
+ self._aux_cache_obj = aux_cache
+
+ def aux_get(self, mycpv, wants, myrepo = None):
+ """This automatically caches selected keys that are frequently needed
+ by emerge for dependency calculations. The cached metadata is
+ considered valid if the mtime of the package directory has not changed
+ since the data was cached. The cache is stored in a pickled dict
+ object with the following format:
+
+ {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
+
+ If an error occurs while loading the cache pickle or the version is
+ unrecognized, the cache will simple be recreated from scratch (it is
+ completely disposable).
+ """
+ cache_these_wants = self._aux_cache_keys.intersection(wants)
+ for x in wants:
+ if self._aux_cache_keys_re.match(x) is not None:
+ cache_these_wants.add(x)
+
+ if not cache_these_wants:
+ return self._aux_get(mycpv, wants)
+
+ cache_these = set(self._aux_cache_keys)
+ cache_these.update(cache_these_wants)
+
+ mydir = self.getpath(mycpv)
+ mydir_stat = None
+ try:
+ mydir_stat = os.stat(mydir)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise KeyError(mycpv)
+ mydir_mtime = mydir_stat[stat.ST_MTIME]
+ pkg_data = self._aux_cache["packages"].get(mycpv)
+ pull_me = cache_these.union(wants)
+ mydata = {"_mtime_" : mydir_mtime}
+ cache_valid = False
+ cache_incomplete = False
+ cache_mtime = None
+ metadata = None
+ if pkg_data is not None:
+ if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
+ pkg_data = None
+ else:
+ cache_mtime, metadata = pkg_data
+ if not isinstance(cache_mtime, (long, int)) or \
+ not isinstance(metadata, dict):
+ pkg_data = None
+
+ if pkg_data:
+ cache_mtime, metadata = pkg_data
+ cache_valid = cache_mtime == mydir_mtime
+ if cache_valid:
+ # Migrate old metadata to unicode.
+ for k, v in metadata.items():
+ metadata[k] = _unicode_decode(v,
+ encoding=_encodings['repo.content'], errors='replace')
+
+ mydata.update(metadata)
+ pull_me.difference_update(mydata)
+
+ if pull_me:
+ # pull any needed data and cache it
+ aux_keys = list(pull_me)
+ for k, v in zip(aux_keys,
+ self._aux_get(mycpv, aux_keys, st=mydir_stat)):
+ mydata[k] = v
+ if not cache_valid or cache_these.difference(metadata):
+ cache_data = {}
+ if cache_valid and metadata:
+ cache_data.update(metadata)
+ for aux_key in cache_these:
+ cache_data[aux_key] = mydata[aux_key]
+ self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
+ self._aux_cache["modified"].add(mycpv)
+
+ if _slot_re.match(mydata['SLOT']) is None:
+ # Empty or invalid slot triggers InvalidAtom exceptions when
+ # generating slot atoms for packages, so translate it to '0' here.
+ mydata['SLOT'] = _unicode_decode('0')
+
+ return [mydata[x] for x in wants]
+
+ def _aux_get(self, mycpv, wants, st=None):
+ mydir = self.getpath(mycpv)
+ if st is None:
+ try:
+ st = os.stat(mydir)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise KeyError(mycpv)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(mydir)
+ else:
+ raise
+ if not stat.S_ISDIR(st.st_mode):
+ raise KeyError(mycpv)
+ results = []
+ for x in wants:
+ if x == "_mtime_":
+ results.append(st[stat.ST_MTIME])
+ continue
+ try:
+ myf = io.open(
+ _unicode_encode(os.path.join(mydir, x),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ try:
+ myd = myf.read()
+ finally:
+ myf.close()
+ # Preserve \n for metadata that is known to
+ # contain multiple lines.
+ if self._aux_multi_line_re.match(x) is None:
+ myd = " ".join(myd.split())
+ except IOError:
+ myd = _unicode_decode('')
+ if x == "EAPI" and not myd:
+ results.append(_unicode_decode('0'))
+ else:
+ results.append(myd)
+ return results
+
+ def aux_update(self, cpv, values):
+ mylink = self._dblink(cpv)
+ if not mylink.exists():
+ raise KeyError(cpv)
+ self._bump_mtime(cpv)
+ self._clear_pkg_cache(mylink)
+ for k, v in values.items():
+ if v:
+ mylink.setfile(k, v)
+ else:
+ try:
+ os.unlink(os.path.join(self.getpath(cpv), k))
+ except EnvironmentError:
+ pass
+ self._bump_mtime(cpv)
+
+ def counter_tick(self, myroot=None, mycpv=None):
+ """
+ @param myroot: ignored, self._eroot is used instead
+ """
+ return self.counter_tick_core(incrementing=1, mycpv=mycpv)
+
+ def get_counter_tick_core(self, myroot=None, mycpv=None):
+ """
+ Use this method to retrieve the counter instead
+ of having to trust the value of a global counter
+ file that can lead to invalid COUNTER
+ generation. When cache is valid, the package COUNTER
+ files are not read and we rely on the timestamp of
+ the package directory to validate cache. The stat
+ calls should only take a short time, so performance
+ is sufficient without having to rely on a potentially
+ corrupt global counter file.
+
+ The global counter file located at
+ $CACHE_PATH/counter serves to record the
+ counter of the last installed package and
+ it also corresponds to the total number of
+ installation actions that have occurred in
+ the history of this package database.
+
+ @param myroot: ignored, self._eroot is used instead
+ """
+ myroot = None
+ new_vdb = False
+ counter = -1
+ try:
+ cfile = io.open(
+ _unicode_encode(self._counter_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError as e:
+ new_vdb = not bool(self.cpv_all())
+ if not new_vdb:
+ writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+ else:
+ try:
+ try:
+ counter = long(cfile.readline().strip())
+ finally:
+ cfile.close()
+ except (OverflowError, ValueError) as e:
+ writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+
+ if self._cached_counter == counter:
+ max_counter = counter
+ else:
+ # We must ensure that we return a counter
+ # value that is at least as large as the
+ # highest one from the installed packages,
+ # since having a corrupt value that is too low
+ # can trigger incorrect AUTOCLEAN behavior due
+ # to newly installed packages having lower
+ # COUNTERs than the previous version in the
+ # same slot.
+ max_counter = counter
+ for cpv in self.cpv_all():
+ try:
+ pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
+ except (KeyError, OverflowError, ValueError):
+ continue
+ if pkg_counter > max_counter:
+ max_counter = pkg_counter
+
+ if counter < 0 and not new_vdb:
+ writemsg(_("!!! Initializing COUNTER to " \
+ "value of %d\n") % max_counter, noiselevel=-1)
+
+ return max_counter + 1
+
+ def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
+ """
+ This method will grab the next COUNTER value and record it back
+ to the global file. Note that every package install must have
+ a unique counter, since a slotmove update can move two packages
+ into the same SLOT and in that case it's important that both
+ packages have different COUNTER metadata.
+
+ @param myroot: ignored, self._eroot is used instead
+ @param mycpv: ignored
+ @rtype: int
+ @returns: new counter value
+ """
+ myroot = None
+ mycpv = None
+ self.lock()
+ try:
+ counter = self.get_counter_tick_core() - 1
+ if incrementing:
+ #increment counter
+ counter += 1
+ # update new global counter file
+ try:
+ write_atomic(self._counter_path, str(counter))
+ except InvalidLocation:
+ self.settings._init_dirs()
+ write_atomic(self._counter_path, str(counter))
+ self._cached_counter = counter
+
+ # Since we hold a lock, this is a good opportunity
+ # to flush the cache. Note that this will only
+ # flush the cache periodically in the main process
+ # when _aux_cache_threshold is exceeded.
+ self.flush_cache()
+ finally:
+ self.unlock()
+
+ return counter
+
+ def _dblink(self, cpv):
+ category, pf = catsplit(cpv)
+ return dblink(category, pf, settings=self.settings,
+ vartree=self.vartree, treetype="vartree")
+
+ def removeFromContents(self, pkg, paths, relative_paths=True):
+ """
+ @param pkg: cpv for an installed package
+ @type pkg: string
+ @param paths: paths of files to remove from contents
+ @type paths: iterable
+ """
+ if not hasattr(pkg, "getcontents"):
+ pkg = self._dblink(pkg)
+ root = self.settings['ROOT']
+ root_len = len(root) - 1
+ new_contents = pkg.getcontents().copy()
+ removed = 0
+
+ for filename in paths:
+ filename = _unicode_decode(filename,
+ encoding=_encodings['content'], errors='strict')
+ filename = normalize_path(filename)
+ if relative_paths:
+ relative_filename = filename
+ else:
+ relative_filename = filename[root_len:]
+ contents_key = pkg._match_contents(relative_filename)
+ if contents_key:
+ del new_contents[contents_key]
+ removed += 1
+
+ if removed:
+ self._bump_mtime(pkg.mycpv)
+ f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
+ write_contents(new_contents, root, f)
+ f.close()
+ self._bump_mtime(pkg.mycpv)
+ pkg._clear_contents_cache()
+
+ class _owners_cache(object):
+ """
+ This class maintains an hash table that serves to index package
+ contents by mapping the basename of file to a list of possible
+ packages that own it. This is used to optimize owner lookups
+ by narrowing the search down to a smaller number of packages.
+ """
+ try:
+ from hashlib import md5 as _new_hash
+ except ImportError:
+ from md5 import new as _new_hash
+
+ _hash_bits = 16
+ _hex_chars = int(_hash_bits / 4)
+
+ def __init__(self, vardb):
+ self._vardb = vardb
+
+ def add(self, cpv):
+ eroot_len = len(self._vardb._eroot)
+ contents = self._vardb._dblink(cpv).getcontents()
+ pkg_hash = self._hash_pkg(cpv)
+ if not contents:
+ # Empty path is a code used to represent empty contents.
+ self._add_path("", pkg_hash)
+
+ for x in contents:
+ self._add_path(x[eroot_len:], pkg_hash)
+
+ self._vardb._aux_cache["modified"].add(cpv)
+
+ def _add_path(self, path, pkg_hash):
+ """
+ Empty path is a code that represents empty contents.
+ """
+ if path:
+ name = os.path.basename(path.rstrip(os.path.sep))
+ if not name:
+ return
+ else:
+ name = path
+ name_hash = self._hash_str(name)
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+ pkgs = base_names.get(name_hash)
+ if pkgs is None:
+ pkgs = {}
+ base_names[name_hash] = pkgs
+ pkgs[pkg_hash] = None
+
+ def _hash_str(self, s):
+ h = self._new_hash()
+ # Always use a constant utf_8 encoding here, since
+ # the "default" encoding can change.
+ h.update(_unicode_encode(s,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace'))
+ h = h.hexdigest()
+ h = h[-self._hex_chars:]
+ h = int(h, 16)
+ return h
+
+ def _hash_pkg(self, cpv):
+ counter, mtime = self._vardb.aux_get(
+ cpv, ["COUNTER", "_mtime_"])
+ try:
+ counter = int(counter)
+ except ValueError:
+ counter = 0
+ return (cpv, counter, mtime)
+
+ class _owners_db(object):
+
+ def __init__(self, vardb):
+ self._vardb = vardb
+
+ def populate(self):
+ self._populate()
+
+ def _populate(self):
+ owners_cache = vardbapi._owners_cache(self._vardb)
+ cached_hashes = set()
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+
+ # Take inventory of all cached package hashes.
+ for name, hash_values in list(base_names.items()):
+ if not isinstance(hash_values, dict):
+ del base_names[name]
+ continue
+ cached_hashes.update(hash_values)
+
+ # Create sets of valid package hashes and uncached packages.
+ uncached_pkgs = set()
+ hash_pkg = owners_cache._hash_pkg
+ valid_pkg_hashes = set()
+ for cpv in self._vardb.cpv_all():
+ hash_value = hash_pkg(cpv)
+ valid_pkg_hashes.add(hash_value)
+ if hash_value not in cached_hashes:
+ uncached_pkgs.add(cpv)
+
+ # Cache any missing packages.
+ for cpv in uncached_pkgs:
+ owners_cache.add(cpv)
+
+ # Delete any stale cache.
+ stale_hashes = cached_hashes.difference(valid_pkg_hashes)
+ if stale_hashes:
+ for base_name_hash, bucket in list(base_names.items()):
+ for hash_value in stale_hashes.intersection(bucket):
+ del bucket[hash_value]
+ if not bucket:
+ del base_names[base_name_hash]
+
+ return owners_cache
+
+ def get_owners(self, path_iter):
+ """
+ @return the owners as a dblink -> set(files) mapping.
+ """
+ owners = {}
+ for owner, f in self.iter_owners(path_iter):
+ owned_files = owners.get(owner)
+ if owned_files is None:
+ owned_files = set()
+ owners[owner] = owned_files
+ owned_files.add(f)
+ return owners
+
+ def getFileOwnerMap(self, path_iter):
+ owners = self.get_owners(path_iter)
+ file_owners = {}
+ for pkg_dblink, files in owners.items():
+ for f in files:
+ owner_set = file_owners.get(f)
+ if owner_set is None:
+ owner_set = set()
+ file_owners[f] = owner_set
+ owner_set.add(pkg_dblink)
+ return file_owners
+
+ def iter_owners(self, path_iter):
+ """
+ Iterate over tuples of (dblink, path). In order to avoid
+ consuming too many resources for too much time, resources
+ are only allocated for the duration of a given iter_owners()
+ call. Therefore, to maximize reuse of resources when searching
+ for multiple files, it's best to search for them all in a single
+ call.
+ """
+
+ if not isinstance(path_iter, list):
+ path_iter = list(path_iter)
+ owners_cache = self._populate()
+ vardb = self._vardb
+ root = vardb._eroot
+ hash_pkg = owners_cache._hash_pkg
+ hash_str = owners_cache._hash_str
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+
+ dblink_cache = {}
+
+ def dblink(cpv):
+ x = dblink_cache.get(cpv)
+ if x is None:
+ if len(dblink_cache) > 20:
+ # Ensure that we don't run out of memory.
+ raise StopIteration()
+ x = self._vardb._dblink(cpv)
+ dblink_cache[cpv] = x
+ return x
+
+ while path_iter:
+
+ path = path_iter.pop()
+ is_basename = os.sep != path[:1]
+ if is_basename:
+ name = path
+ else:
+ name = os.path.basename(path.rstrip(os.path.sep))
+
+ if not name:
+ continue
+
+ name_hash = hash_str(name)
+ pkgs = base_names.get(name_hash)
+ owners = []
+ if pkgs is not None:
+ try:
+ for hash_value in pkgs:
+ if not isinstance(hash_value, tuple) or \
+ len(hash_value) != 3:
+ continue
+ cpv, counter, mtime = hash_value
+ if not isinstance(cpv, basestring):
+ continue
+ try:
+ current_hash = hash_pkg(cpv)
+ except KeyError:
+ continue
+
+ if current_hash != hash_value:
+ continue
+
+ if is_basename:
+ for p in dblink(cpv).getcontents():
+ if os.path.basename(p) == name:
+ owners.append((cpv, p[len(root):]))
+ else:
+ if dblink(cpv).isowner(path):
+ owners.append((cpv, path))
+
+ except StopIteration:
+ path_iter.append(path)
+ del owners[:]
+ dblink_cache.clear()
+ gc.collect()
+ for x in self._iter_owners_low_mem(path_iter):
+ yield x
+ return
+ else:
+ for cpv, p in owners:
+ yield (dblink(cpv), p)
+
+ def _iter_owners_low_mem(self, path_list):
+ """
+ This implemention will make a short-lived dblink instance (and
+ parse CONTENTS) for every single installed package. This is
+ slower and but uses less memory than the method which uses the
+ basename cache.
+ """
+
+ if not path_list:
+ return
+
+ path_info_list = []
+ for path in path_list:
+ is_basename = os.sep != path[:1]
+ if is_basename:
+ name = path
+ else:
+ name = os.path.basename(path.rstrip(os.path.sep))
+ path_info_list.append((path, name, is_basename))
+
+ root = self._vardb._eroot
+ for cpv in self._vardb.cpv_all():
+ dblnk = self._vardb._dblink(cpv)
+
+ for path, name, is_basename in path_info_list:
+ if is_basename:
+ for p in dblnk.getcontents():
+ if os.path.basename(p) == name:
+ yield dblnk, p[len(root):]
+ else:
+ if dblnk.isowner(path):
+ yield dblnk, path
+
+class vartree(object):
+ "this tree will scan a var/db/pkg database located at root (passed to init)"
+ def __init__(self, root=None, virtual=None, categories=None,
+ settings=None):
+
+ if settings is None:
+ settings = portage.settings
+ self.root = settings['ROOT']
+
+ if root is not None and root != self.root:
+ warnings.warn("The 'root' parameter of the " + \
+ "portage.dbapi.vartree.vartree" + \
+ " constructor is now unused. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.settings = settings
+ self.dbapi = vardbapi(settings=settings, vartree=self)
+ self.populated = 1
+
+ def getpath(self, mykey, filename=None):
+ return self.dbapi.getpath(mykey, filename=filename)
+
+ def zap(self, mycpv):
+ return
+
+ def inject(self, mycpv):
+ return
+
+ def get_provide(self, mycpv):
+ myprovides = []
+ mylines = None
+ try:
+ mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
+ if mylines:
+ myuse = myuse.split()
+ mylines = use_reduce(mylines, uselist=myuse, flat=True)
+ for myprovide in mylines:
+ mys = catpkgsplit(myprovide)
+ if not mys:
+ mys = myprovide.split("/")
+ myprovides += [mys[0] + "/" + mys[1]]
+ return myprovides
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ mydir = self.dbapi.getpath(mycpv)
+ writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
+ noiselevel=-1)
+ if mylines:
+ writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
+ noiselevel=-1)
+ writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
+ return []
+
+ def get_all_provides(self):
+ myprovides = {}
+ for node in self.getallcpv():
+ for mykey in self.get_provide(node):
+ if mykey in myprovides:
+ myprovides[mykey] += [node]
+ else:
+ myprovides[mykey] = [node]
+ return myprovides
+
+ def dep_bestmatch(self, mydep, use_cache=1):
+ "compatibility method -- all matches, not just visible ones"
+ #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
+ mymatch = best(self.dbapi.match(
+ dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
+ use_cache=use_cache))
+ if mymatch is None:
+ return ""
+ else:
+ return mymatch
+
+ def dep_match(self, mydep, use_cache=1):
+ "compatibility method -- we want to see all matches, not just visible ones"
+ #mymatch = match(mydep,self.dbapi)
+ mymatch = self.dbapi.match(mydep, use_cache=use_cache)
+ if mymatch is None:
+ return []
+ else:
+ return mymatch
+
+ def exists_specific(self, cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallcpv(self):
+ """temporary function, probably to be renamed --- Gets a list of all
+ category/package-versions installed on the system."""
+ return self.dbapi.cpv_all()
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getebuildpath(self, fullpackage):
+ cat, package = catsplit(fullpackage)
+ return self.getpath(fullpackage, filename=package+".ebuild")
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ try:
+ return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+ except KeyError:
+ return ""
+
+ def populate(self):
+ self.populated=1
+
+class dblink(object):
+ """
+ This class provides an interface to the installed package database
+ At present this is implemented as a text backend in /var/db/pkg.
+ """
+
+ import re
+ _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
+
+ _contents_re = re.compile(r'^(' + \
+ r'(?P<dir>(dev|dir|fif) (.+))|' + \
+ r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
+ r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
+ r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
+ r')$'
+ )
+
+ def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
+ vartree=None, blockers=None, scheduler=None, pipe=None):
+ """
+ Creates a DBlink object for a given CPV.
+ The given CPV may not be present in the database already.
+
+ @param cat: Category
+ @type cat: String
+ @param pkg: Package (PV)
+ @type pkg: String
+ @param myroot: ignored, settings['ROOT'] is used instead
+ @type myroot: String (Path)
+ @param settings: Typically portage.settings
+ @type settings: portage.config
+ @param treetype: one of ['porttree','bintree','vartree']
+ @type treetype: String
+ @param vartree: an instance of vartree corresponding to myroot.
+ @type vartree: vartree
+ """
+
+ if settings is None:
+ raise TypeError("settings argument is required")
+
+ mysettings = settings
+ myroot = settings['ROOT']
+ self.cat = cat
+ self.pkg = pkg
+ self.mycpv = self.cat + "/" + self.pkg
+ self.mysplit = list(catpkgsplit(self.mycpv)[1:])
+ self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
+ self.treetype = treetype
+ if vartree is None:
+ vartree = portage.db[myroot]["vartree"]
+ self.vartree = vartree
+ self._blockers = blockers
+ self._scheduler = scheduler
+
+ # WARNING: EROOT support is experimental and may be incomplete
+ # for cases in which EPREFIX is non-empty.
+ self._eroot = mysettings['EROOT']
+ self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
+ self.dbcatdir = self.dbroot+"/"+cat
+ self.dbpkgdir = self.dbcatdir+"/"+pkg
+ self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
+ self.dbdir = self.dbpkgdir
+ self.settings = mysettings
+ self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
+
+ self.myroot=myroot
+ self._installed_instance = None
+ self.contentscache = None
+ self._contents_inodes = None
+ self._contents_basenames = None
+ self._linkmap_broken = False
+ self._md5_merge_map = {}
+ self._hash_key = (self.myroot, self.mycpv)
+ self._protect_obj = None
+ self._pipe = pipe
+
+ def __hash__(self):
+ return hash(self._hash_key)
+
+ def __eq__(self, other):
+ return isinstance(other, dblink) and \
+ self._hash_key == other._hash_key
+
+ def _get_protect_obj(self):
+
+ if self._protect_obj is None:
+ self._protect_obj = ConfigProtect(self._eroot,
+ portage.util.shlex_split(
+ self.settings.get("CONFIG_PROTECT", "")),
+ portage.util.shlex_split(
+ self.settings.get("CONFIG_PROTECT_MASK", "")))
+
+ return self._protect_obj
+
+ def isprotected(self, obj):
+ return self._get_protect_obj().isprotected(obj)
+
+ def updateprotect(self):
+ self._get_protect_obj().updateprotect()
+
+ def lockdb(self):
+ self.vartree.dbapi.lock()
+
+ def unlockdb(self):
+ self.vartree.dbapi.unlock()
+
+ def getpath(self):
+ "return path to location of db information (for >>> informational display)"
+ return self.dbdir
+
+ def exists(self):
+ "does the db entry exist? boolean."
+ return os.path.exists(self.dbdir)
+
+ def delete(self):
+ """
+ Remove this entry from the database
+ """
+ if not os.path.exists(self.dbdir):
+ return
+
+ # Check validity of self.dbdir before attempting to remove it.
+ if not self.dbdir.startswith(self.dbroot):
+ writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
+ self.dbdir, noiselevel=-1)
+ return
+
+ shutil.rmtree(self.dbdir)
+ # If empty, remove parent category directory.
+ try:
+ os.rmdir(os.path.dirname(self.dbdir))
+ except OSError:
+ pass
+ self.vartree.dbapi._remove(self)
+
+ def clearcontents(self):
+ """
+ For a given db entry (self), erase the CONTENTS values.
+ """
+ self.lockdb()
+ try:
+ if os.path.exists(self.dbdir+"/CONTENTS"):
+ os.unlink(self.dbdir+"/CONTENTS")
+ finally:
+ self.unlockdb()
+
+ def _clear_contents_cache(self):
+ self.contentscache = None
+ self._contents_inodes = None
+ self._contents_basenames = None
+
+ def getcontents(self):
+ """
+ Get the installed files of a given package (aka what that package installed)
+ """
+ contents_file = os.path.join(self.dbdir, "CONTENTS")
+ if self.contentscache is not None:
+ return self.contentscache
+ pkgfiles = {}
+ try:
+ myc = io.open(_unicode_encode(contents_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ self.contentscache = pkgfiles
+ return pkgfiles
+ mylines = myc.readlines()
+ myc.close()
+ null_byte = "\0"
+ normalize_needed = self._normalize_needed
+ contents_re = self._contents_re
+ obj_index = contents_re.groupindex['obj']
+ dir_index = contents_re.groupindex['dir']
+ sym_index = contents_re.groupindex['sym']
+ # The old symlink format may exist on systems that have packages
+ # which were installed many years ago (see bug #351814).
+ oldsym_index = contents_re.groupindex['oldsym']
+ # CONTENTS files already contain EPREFIX
+ myroot = self.settings['ROOT']
+ if myroot == os.path.sep:
+ myroot = None
+ # used to generate parent dir entries
+ dir_entry = (_unicode_decode("dir"),)
+ eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
+ pos = 0
+ errors = []
+ for pos, line in enumerate(mylines):
+ if null_byte in line:
+ # Null bytes are a common indication of corruption.
+ errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
+ continue
+ line = line.rstrip("\n")
+ m = contents_re.match(line)
+ if m is None:
+ errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
+ continue
+
+ if m.group(obj_index) is not None:
+ base = obj_index
+ #format: type, mtime, md5sum
+ data = (m.group(base+1), m.group(base+4), m.group(base+3))
+ elif m.group(dir_index) is not None:
+ base = dir_index
+ #format: type
+ data = (m.group(base+1),)
+ elif m.group(sym_index) is not None:
+ base = sym_index
+ if m.group(oldsym_index) is None:
+ mtime = m.group(base+5)
+ else:
+ mtime = m.group(base+8)
+ #format: type, mtime, dest
+ data = (m.group(base+1), mtime, m.group(base+3))
+ else:
+ # This won't happen as long the regular expression
+ # is written to only match valid entries.
+ raise AssertionError(_("required group not found " + \
+ "in CONTENTS entry: '%s'") % line)
+
+ path = m.group(base+2)
+ if normalize_needed.search(path) is not None:
+ path = normalize_path(path)
+ if not path.startswith(os.path.sep):
+ path = os.path.sep + path
+
+ if myroot is not None:
+ path = os.path.join(myroot, path.lstrip(os.path.sep))
+
+ # Implicitly add parent directories, since we can't necessarily
+ # assume that they are explicitly listed in CONTENTS, and it's
+ # useful for callers if they can rely on parent directory entries
+ # being generated here (crucial for things like dblink.isowner()).
+ path_split = path.split(os.sep)
+ path_split.pop()
+ while len(path_split) > eroot_split_len:
+ parent = os.sep.join(path_split)
+ if parent in pkgfiles:
+ break
+ pkgfiles[parent] = dir_entry
+ path_split.pop()
+
+ pkgfiles[path] = data
+
+ if errors:
+ writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
+ for pos, e in errors:
+ writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
+ self.contentscache = pkgfiles
+ return pkgfiles
+
+ def _prune_plib_registry(self, unmerge=False,
+ needed=None, preserve_paths=None):
+ # remove preserved libraries that don't have any consumers left
+ if not (self._linkmap_broken or
+ self.vartree.dbapi._linkmap is None or
+ self.vartree.dbapi._plib_registry is None):
+ self.vartree.dbapi._fs_lock()
+ plib_registry = self.vartree.dbapi._plib_registry
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+
+ unmerge_with_replacement = \
+ unmerge and preserve_paths is not None
+ if unmerge_with_replacement:
+ # If self.mycpv is about to be unmerged and we
+ # have a replacement package, we want to exclude
+ # the irrelevant NEEDED data that belongs to
+ # files which are being unmerged now.
+ exclude_pkgs = (self.mycpv,)
+ else:
+ exclude_pkgs = None
+
+ self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
+ include_file=needed, preserve_paths=preserve_paths)
+
+ if unmerge:
+ unmerge_preserve = None
+ if not unmerge_with_replacement:
+ unmerge_preserve = \
+ self._find_libs_to_preserve(unmerge=True)
+ counter = self.vartree.dbapi.cpv_counter(self.mycpv)
+ plib_registry.unregister(self.mycpv,
+ self.settings["SLOT"], counter)
+ if unmerge_preserve:
+ for path in sorted(unmerge_preserve):
+ contents_key = self._match_contents(path)
+ if not contents_key:
+ continue
+ obj_type = self.getcontents()[contents_key][0]
+ self._display_merge(_(">>> needed %s %s\n") % \
+ (obj_type, contents_key), noiselevel=-1)
+ plib_registry.register(self.mycpv,
+ self.settings["SLOT"], counter, unmerge_preserve)
+ # Remove the preserved files from our contents
+ # so that they won't be unmerged.
+ self.vartree.dbapi.removeFromContents(self,
+ unmerge_preserve)
+
+ unmerge_no_replacement = \
+ unmerge and not unmerge_with_replacement
+ cpv_lib_map = self._find_unused_preserved_libs(
+ unmerge_no_replacement)
+ if cpv_lib_map:
+ self._remove_preserved_libs(cpv_lib_map)
+ self.vartree.dbapi.lock()
+ try:
+ for cpv, removed in cpv_lib_map.items():
+ if not self.vartree.dbapi.cpv_exists(cpv):
+ continue
+ self.vartree.dbapi.removeFromContents(cpv, removed)
+ finally:
+ self.vartree.dbapi.unlock()
+
+ plib_registry.store()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
+ ldpath_mtimes=None, others_in_slot=None, needed=None,
+ preserve_paths=None):
+ """
+ Calls prerm
+ Unmerges a given package (CPV)
+ calls postrm
+ calls cleanrm
+ calls env_update
+
+ @param pkgfiles: files to unmerge (generally self.getcontents() )
+ @type pkgfiles: Dictionary
+ @param trimworld: Unused
+ @type trimworld: Boolean
+ @param cleanup: cleanup to pass to doebuild (see doebuild)
+ @type cleanup: Boolean
+ @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
+ @type ldpath_mtimes: Dictionary
+ @param others_in_slot: all dblink instances in this slot, excluding self
+ @type others_in_slot: list
+ @param needed: Filename containing libraries needed after unmerge.
+ @type needed: String
+ @param preserve_paths: Libraries preserved by a package instance that
+ is currently being merged. They need to be explicitly passed to the
+ LinkageMap, since they are not registered in the
+ PreservedLibsRegistry yet.
+ @type preserve_paths: set
+ @rtype: Integer
+ @returns:
+ 1. os.EX_OK if everything went well.
+ 2. return code of the failed phase (for prerm, postrm, cleanrm)
+ """
+
+ if trimworld is not None:
+ warnings.warn("The trimworld parameter of the " + \
+ "portage.dbapi.vartree.dblink.unmerge()" + \
+ " method is now unused.",
+ DeprecationWarning, stacklevel=2)
+
+ background = False
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ if self._scheduler is None:
+ # We create a scheduler instance and use it to
+ # log unmerge output separately from merge output.
+ self._scheduler = PollScheduler().sched_iface
+ if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
+ if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
+ self.settings["PORTAGE_BACKGROUND"] = "1"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+ background = True
+ elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
+ self.settings["PORTAGE_BACKGROUND"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+ elif self.settings.get("PORTAGE_BACKGROUND") == "1":
+ background = True
+
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ showMessage = self._display_merge
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+ # When others_in_slot is supplied, the security check has already been
+ # done for this slot, so it shouldn't be repeated until the next
+ # replacement or unmerge operation.
+ if others_in_slot is None:
+ slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+ slot_matches = self.vartree.dbapi.match(
+ "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
+ others_in_slot = []
+ for cur_cpv in slot_matches:
+ if cur_cpv == self.mycpv:
+ continue
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=self.settings, vartree=self.vartree,
+ treetype="vartree", pipe=self._pipe))
+
+ retval = self._security_check([self] + others_in_slot)
+ if retval:
+ return retval
+
+ contents = self.getcontents()
+ # Now, don't assume that the name of the ebuild is the same as the
+ # name of the dir; the package may have been moved.
+ myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
+ failures = 0
+ ebuild_phase = "prerm"
+ mystuff = os.listdir(self.dbdir)
+ for x in mystuff:
+ if x.endswith(".ebuild"):
+ if x[:-7] != self.pkg:
+ # Clean up after vardbapi.move_ent() breakage in
+ # portage versions before 2.1.2
+ os.rename(os.path.join(self.dbdir, x), myebuildpath)
+ write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
+ break
+
+ if self.mycpv != self.settings.mycpv or \
+ "EAPI" not in self.settings.configdict["pkg"]:
+ # We avoid a redundant setcpv call here when
+ # the caller has already taken care of it.
+ self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
+
+ eapi_unsupported = False
+ try:
+ doebuild_environment(myebuildpath, "prerm",
+ settings=self.settings, db=self.vartree.dbapi)
+ except UnsupportedAPIException as e:
+ eapi_unsupported = e
+
+ self._prune_plib_registry(unmerge=True, needed=needed,
+ preserve_paths=preserve_paths)
+
+ builddir_lock = None
+ scheduler = self._scheduler
+ retval = os.EX_OK
+ try:
+ # Only create builddir_lock if the caller
+ # has not already acquired the lock.
+ if "PORTAGE_BUILDIR_LOCKED" not in self.settings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=scheduler,
+ settings=self.settings)
+ builddir_lock.lock()
+ prepare_build_dirs(settings=self.settings, cleanup=True)
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+
+ # Log the error after PORTAGE_LOG_FILE is initialized
+ # by prepare_build_dirs above.
+ if eapi_unsupported:
+ # Sometimes this happens due to corruption of the EAPI file.
+ failures += 1
+ showMessage(_("!!! FAILED prerm: %s\n") % \
+ os.path.join(self.dbdir, "EAPI"),
+ level=logging.ERROR, noiselevel=-1)
+ showMessage(_unicode_decode("%s\n") % (eapi_unsupported,),
+ level=logging.ERROR, noiselevel=-1)
+ elif os.path.isfile(myebuildpath):
+ phase = EbuildPhase(background=background,
+ phase=ebuild_phase, scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ failures += 1
+ showMessage(_("!!! FAILED prerm: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+
+ self.vartree.dbapi._fs_lock()
+ try:
+ self._unmerge_pkgfiles(pkgfiles, others_in_slot)
+ finally:
+ self.vartree.dbapi._fs_unlock()
+ self._clear_contents_cache()
+
+ if not eapi_unsupported and os.path.isfile(myebuildpath):
+ ebuild_phase = "postrm"
+ phase = EbuildPhase(background=background,
+ phase=ebuild_phase, scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ failures += 1
+ showMessage(_("!!! FAILED postrm: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+
+ finally:
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ try:
+ if not eapi_unsupported and os.path.isfile(myebuildpath):
+ if retval != os.EX_OK:
+ msg_lines = []
+ msg = _("The '%(ebuild_phase)s' "
+ "phase of the '%(cpv)s' package "
+ "has failed with exit value %(retval)s.") % \
+ {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
+ "retval":retval}
+ from textwrap import wrap
+ msg_lines.extend(wrap(msg, 72))
+ msg_lines.append("")
+
+ ebuild_name = os.path.basename(myebuildpath)
+ ebuild_dir = os.path.dirname(myebuildpath)
+ msg = _("The problem occurred while executing "
+ "the ebuild file named '%(ebuild_name)s' "
+ "located in the '%(ebuild_dir)s' directory. "
+ "If necessary, manually remove "
+ "the environment.bz2 file and/or the "
+ "ebuild file located in that directory.") % \
+ {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
+ msg_lines.extend(wrap(msg, 72))
+ msg_lines.append("")
+
+ msg = _("Removal "
+ "of the environment.bz2 file is "
+ "preferred since it may allow the "
+ "removal phases to execute successfully. "
+ "The ebuild will be "
+ "sourced and the eclasses "
+ "from the current portage tree will be used "
+ "when necessary. Removal of "
+ "the ebuild file will cause the "
+ "pkg_prerm() and pkg_postrm() removal "
+ "phases to be skipped entirely.")
+ msg_lines.extend(wrap(msg, 72))
+
+ self._eerror(ebuild_phase, msg_lines)
+
+ self._elog_process(phasefilter=("prerm", "postrm"))
+
+ if retval == os.EX_OK:
+ try:
+ doebuild_environment(myebuildpath, "cleanrm",
+ settings=self.settings, db=self.vartree.dbapi)
+ except UnsupportedAPIException:
+ pass
+ phase = EbuildPhase(background=background,
+ phase="cleanrm", scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+ finally:
+ if builddir_lock is not None:
+ builddir_lock.unlock()
+
+ if log_path is not None:
+
+ if not failures and 'unmerge-logs' not in self.settings.features:
+ try:
+ os.unlink(log_path)
+ except OSError:
+ pass
+
+ try:
+ st = os.stat(log_path)
+ except OSError:
+ pass
+ else:
+ if st.st_size == 0:
+ try:
+ os.unlink(log_path)
+ except OSError:
+ pass
+
+ if log_path is not None and os.path.exists(log_path):
+ # Restore this since it gets lost somewhere above and it
+ # needs to be set for _display_merge() to be able to log.
+ # Note that the log isn't necessarily supposed to exist
+ # since if PORT_LOGDIR is unset then it's a temp file
+ # so it gets cleaned above.
+ self.settings["PORTAGE_LOG_FILE"] = log_path
+ else:
+ self.settings.pop("PORTAGE_LOG_FILE", None)
+
+ # Lock the config memory file to prevent symlink creation
+ # in merge_contents from overlapping with env-update.
+ self.vartree.dbapi._fs_lock()
+ try:
+ env_update(target_root=self.settings['ROOT'],
+ prev_mtimes=ldpath_mtimes,
+ contents=contents, env=self.settings.environ(),
+ writemsg_level=self._display_merge)
+ finally:
+ self.vartree.dbapi._fs_unlock()
+
+ return os.EX_OK
+
+ def _display_merge(self, msg, level=0, noiselevel=0):
+ if not self._verbose and noiselevel >= 0 and level < logging.WARN:
+ return
+ if self._scheduler is None:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ background = self.settings.get("PORTAGE_BACKGROUND") == "1"
+
+ if background and log_path is None:
+ if level >= logging.WARN:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+ self._scheduler.output(msg,
+ log_path=log_path, background=background,
+ level=level, noiselevel=noiselevel)
+
+ def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
+ """
+
+ Unmerges the contents of a package from the liveFS
+ Removes the VDB entry for self
+
+ @param pkgfiles: typically self.getcontents()
+ @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
+ @param others_in_slot: all dblink instances in this slot, excluding self
+ @type others_in_slot: list
+ @rtype: None
+ """
+
+ os = _os_merge
+ perf_md5 = perform_md5
+ showMessage = self._display_merge
+
+ if not pkgfiles:
+ showMessage(_("No package files given... Grabbing a set.\n"))
+ pkgfiles = self.getcontents()
+
+ if others_in_slot is None:
+ others_in_slot = []
+ slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+ slot_matches = self.vartree.dbapi.match(
+ "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
+ for cur_cpv in slot_matches:
+ if cur_cpv == self.mycpv:
+ continue
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=self.settings,
+ vartree=self.vartree, treetype="vartree", pipe=self._pipe))
+
+ dest_root = self._eroot
+ dest_root_len = len(dest_root) - 1
+
+ cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
+ stale_confmem = []
+ protected_symlinks = {}
+
+ unmerge_orphans = "unmerge-orphans" in self.settings.features
+ calc_prelink = "prelink-checksums" in self.settings.features
+
+ if pkgfiles:
+ self.updateprotect()
+ mykeys = list(pkgfiles)
+ mykeys.sort()
+ mykeys.reverse()
+
+ #process symlinks second-to-last, directories last.
+ mydirs = set()
+ ignored_unlink_errnos = (
+ errno.EBUSY, errno.ENOENT,
+ errno.ENOTDIR, errno.EISDIR)
+ ignored_rmdir_errnos = (
+ errno.EEXIST, errno.ENOTEMPTY,
+ errno.EBUSY, errno.ENOENT,
+ errno.ENOTDIR, errno.EISDIR,
+ errno.EPERM)
+ modprotect = os.path.join(self._eroot, "lib/modules/")
+
+ def unlink(file_name, lstatobj):
+ if bsd_chflags:
+ if lstatobj.st_flags != 0:
+ bsd_chflags.lchflags(file_name, 0)
+ parent_name = os.path.dirname(file_name)
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = os.stat(parent_name).st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(parent_name, 0)
+ try:
+ if not stat.S_ISLNK(lstatobj.st_mode):
+ # Remove permissions to ensure that any hardlinks to
+ # suid/sgid files are rendered harmless.
+ os.chmod(file_name, 0)
+ os.unlink(file_name)
+ except OSError as ose:
+ # If the chmod or unlink fails, you are in trouble.
+ # With Prefix this can be because the file is owned
+ # by someone else (a screwup by root?), on a normal
+ # system maybe filesystem corruption. In any case,
+ # if we backtrace and die here, we leave the system
+ # in a totally undefined state, hence we just bleed
+ # like hell and continue to hopefully finish all our
+ # administrative and pkg_postinst stuff.
+ self._eerror("postrm",
+ ["Could not chmod or unlink '%s': %s" % \
+ (file_name, ose)])
+ finally:
+ if bsd_chflags and pflags != 0:
+ # Restore the parent flags we saved before unlinking
+ bsd_chflags.chflags(parent_name, pflags)
+
+ def show_unmerge(zing, desc, file_type, file_name):
+ showMessage("%s %s %s %s\n" % \
+ (zing, desc.ljust(8), file_type, file_name))
+
+ unmerge_desc = {}
+ unmerge_desc["cfgpro"] = _("cfgpro")
+ unmerge_desc["replaced"] = _("replaced")
+ unmerge_desc["!dir"] = _("!dir")
+ unmerge_desc["!empty"] = _("!empty")
+ unmerge_desc["!fif"] = _("!fif")
+ unmerge_desc["!found"] = _("!found")
+ unmerge_desc["!md5"] = _("!md5")
+ unmerge_desc["!mtime"] = _("!mtime")
+ unmerge_desc["!obj"] = _("!obj")
+ unmerge_desc["!sym"] = _("!sym")
+
+ real_root = self.settings['ROOT']
+ real_root_len = len(real_root) - 1
+ eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
+
+ # These files are generated by emerge, so we need to remove
+ # them when they are the only thing left in a directory.
+ infodir_cleanup = frozenset(["dir", "dir.old"])
+ infodirs = frozenset(infodir for infodir in chain(
+ self.settings.get("INFOPATH", "").split(":"),
+ self.settings.get("INFODIR", "").split(":")) if infodir)
+ infodirs_inodes = set()
+ for infodir in infodirs:
+ infodir = os.path.join(real_root, infodir.lstrip(os.sep))
+ try:
+ statobj = os.stat(infodir)
+ except OSError:
+ pass
+ else:
+ infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
+
+ for i, objkey in enumerate(mykeys):
+
+ obj = normalize_path(objkey)
+ if os is _os_merge:
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+ perf_md5 = portage.checksum.perform_md5
+
+ file_data = pkgfiles[objkey]
+ file_type = file_data[0]
+ statobj = None
+ try:
+ statobj = os.stat(obj)
+ except OSError:
+ pass
+ lstatobj = None
+ try:
+ lstatobj = os.lstat(obj)
+ except (OSError, AttributeError):
+ pass
+ islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
+ if lstatobj is None:
+ show_unmerge("---", unmerge_desc["!found"], file_type, obj)
+ continue
+ # don't use EROOT, CONTENTS entries already contain EPREFIX
+ if obj.startswith(real_root):
+ relative_path = obj[real_root_len:]
+ is_owned = False
+ for dblnk in others_in_slot:
+ if dblnk.isowner(relative_path):
+ is_owned = True
+ break
+
+ if file_type == "sym" and is_owned and \
+ (islink and statobj and stat.S_ISDIR(statobj.st_mode)):
+ # A new instance of this package claims the file, so
+ # don't unmerge it. If the file is symlink to a
+ # directory and the unmerging package installed it as
+ # a symlink, but the new owner has it listed as a
+ # directory, then we'll produce a warning since the
+ # symlink is a sort of orphan in this case (see
+ # bug #326685).
+ symlink_orphan = False
+ for dblnk in others_in_slot:
+ parent_contents_key = \
+ dblnk._match_contents(relative_path)
+ if not parent_contents_key:
+ continue
+ if not parent_contents_key.startswith(
+ real_root):
+ continue
+ if dblnk.getcontents()[
+ parent_contents_key][0] == "dir":
+ symlink_orphan = True
+ break
+
+ if symlink_orphan:
+ protected_symlinks.setdefault(
+ (statobj.st_dev, statobj.st_ino),
+ []).append(relative_path)
+
+ if is_owned:
+ show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
+ continue
+ elif relative_path in cfgfiledict:
+ stale_confmem.append(relative_path)
+ # next line includes a tweak to protect modules from being unmerged,
+ # but we don't protect modules from being overwritten if they are
+ # upgraded. We effectively only want one half of the config protection
+ # functionality for /lib/modules. For portage-ng both capabilities
+ # should be able to be independently specified.
+ # TODO: For rebuilds, re-parent previous modules to the new
+ # installed instance (so they are not orphans). For normal
+ # uninstall (not rebuild/reinstall), remove the modules along
+ # with all other files (leave no orphans).
+ if obj.startswith(modprotect):
+ show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
+ continue
+
+ # Don't unlink symlinks to directories here since that can
+ # remove /lib and /usr/lib symlinks.
+ if unmerge_orphans and \
+ lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
+ not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
+ not self.isprotected(obj):
+ try:
+ unlink(obj, lstatobj)
+ except EnvironmentError as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("<<<", "", file_type, obj)
+ continue
+
+ lmtime = str(lstatobj[stat.ST_MTIME])
+ if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
+ show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
+ continue
+
+ if pkgfiles[objkey][0] == "dir":
+ if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
+ show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
+ continue
+ mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
+ elif pkgfiles[objkey][0] == "sym":
+ if not islink:
+ show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
+ continue
+
+ # If this symlink points to a directory then we don't want
+ # to unmerge it if there are any other packages that
+ # installed files into the directory via this symlink
+ # (see bug #326685).
+ # TODO: Resolving a symlink to a directory will require
+ # simulation if $ROOT != / and the link is not relative.
+ if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
+ and obj.startswith(real_root):
+
+ relative_path = obj[real_root_len:]
+ try:
+ target_dir_contents = os.listdir(obj)
+ except OSError:
+ pass
+ else:
+ if target_dir_contents:
+ # If all the children are regular files owned
+ # by this package, then the symlink should be
+ # safe to unmerge.
+ all_owned = True
+ for child in target_dir_contents:
+ child = os.path.join(relative_path, child)
+ if not self.isowner(child):
+ all_owned = False
+ break
+ try:
+ child_lstat = os.lstat(os.path.join(
+ real_root, child.lstrip(os.sep)))
+ except OSError:
+ continue
+
+ if not stat.S_ISREG(child_lstat.st_mode):
+ # Nested symlinks or directories make
+ # the issue very complex, so just
+ # preserve the symlink in order to be
+ # on the safe side.
+ all_owned = False
+ break
+
+ if not all_owned:
+ protected_symlinks.setdefault(
+ (statobj.st_dev, statobj.st_ino),
+ []).append(relative_path)
+ show_unmerge("---", unmerge_desc["!empty"],
+ file_type, obj)
+ continue
+
+ # Go ahead and unlink symlinks to directories here when
+ # they're actually recorded as symlinks in the contents.
+ # Normally, symlinks such as /lib -> lib64 are not recorded
+ # as symlinks in the contents of a package. If a package
+ # installs something into ${D}/lib/, it is recorded in the
+ # contents as a directory even if it happens to correspond
+ # to a symlink when it's merged to the live filesystem.
+ try:
+ unlink(obj, lstatobj)
+ show_unmerge("<<<", "", file_type, obj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "obj":
+ if statobj is None or not stat.S_ISREG(statobj.st_mode):
+ show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
+ continue
+ mymd5 = None
+ try:
+ mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
+ except FileNotFound as e:
+ # the file has disappeared between now and our stat call
+ show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
+ continue
+
+ # string.lower is needed because db entries used to be in upper-case. The
+ # string.lower allows for backwards compatibility.
+ if mymd5 != pkgfiles[objkey][2].lower():
+ show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
+ continue
+ try:
+ unlink(obj, lstatobj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("<<<", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "fif":
+ if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
+ show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
+ continue
+ show_unmerge("---", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "dev":
+ show_unmerge("---", "", file_type, obj)
+
+ mydirs = sorted(mydirs)
+ mydirs.reverse()
+
+ for obj, inode_key in mydirs:
+ # Treat any directory named "info" as a candidate here,
+ # since it might have been in INFOPATH previously even
+ # though it may not be there now.
+ if inode_key in infodirs_inodes or \
+ os.path.basename(obj) == "info":
+ try:
+ remaining = os.listdir(obj)
+ except OSError:
+ pass
+ else:
+ cleanup_info_dir = ()
+ if remaining and \
+ len(remaining) <= len(infodir_cleanup):
+ if not set(remaining).difference(infodir_cleanup):
+ cleanup_info_dir = remaining
+
+ for child in cleanup_info_dir:
+ child = os.path.join(obj, child)
+ try:
+ lstatobj = os.lstat(child)
+ if stat.S_ISREG(lstatobj.st_mode):
+ unlink(child, lstatobj)
+ show_unmerge("<<<", "", "obj", child)
+ except EnvironmentError as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", "obj", child)
+ try:
+ if bsd_chflags:
+ lstatobj = os.lstat(obj)
+ if lstatobj.st_flags != 0:
+ bsd_chflags.lchflags(obj, 0)
+ parent_name = os.path.dirname(obj)
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = os.stat(parent_name).st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(parent_name, 0)
+ try:
+ os.rmdir(obj)
+ finally:
+ if bsd_chflags and pflags != 0:
+ # Restore the parent flags we saved before unlinking
+ bsd_chflags.chflags(parent_name, pflags)
+ show_unmerge("<<<", "", "dir", obj)
+ except EnvironmentError as e:
+ if e.errno not in ignored_rmdir_errnos:
+ raise
+ if e.errno != errno.ENOENT:
+ show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
+ del e
+ else:
+ # When a directory is successfully removed, there's
+ # no need to protect symlinks that point to it.
+ unmerge_syms = protected_symlinks.pop(inode_key, None)
+ if unmerge_syms is not None:
+ for relative_path in unmerge_syms:
+ obj = os.path.join(real_root,
+ relative_path.lstrip(os.sep))
+ try:
+ unlink(obj, os.lstat(obj))
+ show_unmerge("<<<", "", "sym", obj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", "sym", obj)
+
+ if protected_symlinks:
+ msg = "One or more symlinks to directories have been " + \
+ "preserved in order to ensure that files installed " + \
+ "via these symlinks remain accessible:"
+ lines = textwrap.wrap(msg, 72)
+ lines.append("")
+ flat_list = set()
+ flat_list.update(*protected_symlinks.values())
+ flat_list = sorted(flat_list)
+ for f in flat_list:
+ lines.append("\t%s" % (os.path.join(real_root,
+ f.lstrip(os.sep))))
+ lines.append("")
+ self._elog("eerror", "postrm", lines)
+
+ # Remove stale entries from config memory.
+ if stale_confmem:
+ for filename in stale_confmem:
+ del cfgfiledict[filename]
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+
+ #remove self from vartree database so that our own virtual gets zapped if we're the last node
+ self.vartree.zap(self.mycpv)
+
+ def isowner(self, filename, destroot=None):
+ """
+ Check if a file belongs to this package. This may
+ result in a stat call for the parent directory of
+ every installed file, since the inode numbers are
+ used to work around the problem of ambiguous paths
+ caused by symlinked directories. The results of
+ stat calls are cached to optimize multiple calls
+ to this method.
+
+ @param filename:
+ @type filename:
+ @param destroot:
+ @type destroot:
+ @rtype: Boolean
+ @returns:
+ 1. True if this package owns the file.
+ 2. False if this package does not own the file.
+ """
+
+ if destroot is not None and destroot != self._eroot:
+ warnings.warn("The second parameter of the " + \
+ "portage.dbapi.vartree.dblink.isowner()" + \
+ " is now unused. Instead " + \
+ "self.settings['EROOT'] will be used.",
+ DeprecationWarning, stacklevel=2)
+
+ return bool(self._match_contents(filename))
+
+ def _match_contents(self, filename, destroot=None):
+ """
+ The matching contents entry is returned, which is useful
+ since the path may differ from the one given by the caller,
+ due to symlinks.
+
+ @rtype: String
+ @return: the contents entry corresponding to the given path, or False
+ if the file is not owned by this package.
+ """
+
+ filename = _unicode_decode(filename,
+ encoding=_encodings['content'], errors='strict')
+
+ if destroot is not None and destroot != self._eroot:
+ warnings.warn("The second parameter of the " + \
+ "portage.dbapi.vartree.dblink._match_contents()" + \
+ " is now unused. Instead " + \
+ "self.settings['ROOT'] will be used.",
+ DeprecationWarning, stacklevel=2)
+
+ # don't use EROOT here, image already contains EPREFIX
+ destroot = self.settings['ROOT']
+
+ # The given filename argument might have a different encoding than the
+ # the filenames contained in the contents, so use separate wrapped os
+ # modules for each. The basename is more likely to contain non-ascii
+ # characters than the directory path, so use os_filename_arg for all
+ # operations involving the basename of the filename arg.
+ os_filename_arg = _os_merge
+ os = _os_merge
+
+ try:
+ _unicode_encode(filename,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os_filename_arg = portage.os
+
+ destfile = normalize_path(
+ os_filename_arg.path.join(destroot,
+ filename.lstrip(os_filename_arg.path.sep)))
+
+ pkgfiles = self.getcontents()
+ if pkgfiles and destfile in pkgfiles:
+ return destfile
+ if pkgfiles:
+ basename = os_filename_arg.path.basename(destfile)
+ if self._contents_basenames is None:
+
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ self._contents_basenames = set(
+ os.path.basename(x) for x in pkgfiles)
+ if basename not in self._contents_basenames:
+ # This is a shortcut that, in most cases, allows us to
+ # eliminate this package as an owner without the need
+ # to examine inode numbers of parent directories.
+ return False
+
+ # Use stat rather than lstat since we want to follow
+ # any symlinks to the real parent directory.
+ parent_path = os_filename_arg.path.dirname(destfile)
+ try:
+ parent_stat = os_filename_arg.stat(parent_path)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ return False
+ if self._contents_inodes is None:
+
+ if os is _os_merge:
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in pkgfiles:
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ self._contents_inodes = {}
+ parent_paths = set()
+ for x in pkgfiles:
+ p_path = os.path.dirname(x)
+ if p_path in parent_paths:
+ continue
+ parent_paths.add(p_path)
+ try:
+ s = os.stat(p_path)
+ except OSError:
+ pass
+ else:
+ inode_key = (s.st_dev, s.st_ino)
+ # Use lists of paths in case multiple
+ # paths reference the same inode.
+ p_path_list = self._contents_inodes.get(inode_key)
+ if p_path_list is None:
+ p_path_list = []
+ self._contents_inodes[inode_key] = p_path_list
+ if p_path not in p_path_list:
+ p_path_list.append(p_path)
+
+ p_path_list = self._contents_inodes.get(
+ (parent_stat.st_dev, parent_stat.st_ino))
+ if p_path_list:
+ for p_path in p_path_list:
+ x = os_filename_arg.path.join(p_path, basename)
+ if x in pkgfiles:
+ return x
+
+ return False
+
+ def _linkmap_rebuild(self, **kwargs):
+ """
+ Rebuild the self._linkmap if it's not broken due to missing
+ scanelf binary. Also, return early if preserve-libs is disabled
+ and the preserve-libs registry is empty.
+ """
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ ("preserve-libs" not in self.settings.features and \
+ not self.vartree.dbapi._plib_registry.hasEntries()):
+ return
+ try:
+ self.vartree.dbapi._linkmap.rebuild(**kwargs)
+ except CommandNotFound as e:
+ self._linkmap_broken = True
+ self._display_merge(_("!!! Disabling preserve-libs " \
+ "due to error: Command Not Found: %s\n") % (e,),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _find_libs_to_preserve(self, unmerge=False):
+ """
+ Get set of relative paths for libraries to be preserved. When
+ unmerge is False, file paths to preserve are selected from
+ self._installed_instance. Otherwise, paths are selected from
+ self.
+ """
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ (not unmerge and self._installed_instance is None) or \
+ "preserve-libs" not in self.settings.features:
+ return set()
+
+ os = _os_merge
+ linkmap = self.vartree.dbapi._linkmap
+ if unmerge:
+ installed_instance = self
+ else:
+ installed_instance = self._installed_instance
+ old_contents = installed_instance.getcontents()
+ root = self.settings['ROOT']
+ root_len = len(root) - 1
+ lib_graph = digraph()
+ path_node_map = {}
+
+ def path_to_node(path):
+ node = path_node_map.get(path)
+ if node is None:
+ node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
+ alt_path_node = lib_graph.get(node)
+ if alt_path_node is not None:
+ node = alt_path_node
+ node.alt_paths.add(path)
+ path_node_map[path] = node
+ return node
+
+ consumer_map = {}
+ provider_nodes = set()
+ # Create provider nodes and add them to the graph.
+ for f_abs in old_contents:
+
+ if os is _os_merge:
+ try:
+ _unicode_encode(f_abs,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(f_abs,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ f = f_abs[root_len:]
+ if not unmerge and self.isowner(f):
+ # We have an indentically named replacement file,
+ # so we don't try to preserve the old copy.
+ continue
+ try:
+ consumers = linkmap.findConsumers(f,
+ exclude_providers=(installed_instance.isowner,))
+ except KeyError:
+ continue
+ if not consumers:
+ continue
+ provider_node = path_to_node(f)
+ lib_graph.add(provider_node, None)
+ provider_nodes.add(provider_node)
+ consumer_map[provider_node] = consumers
+
+ # Create consumer nodes and add them to the graph.
+ # Note that consumers can also be providers.
+ for provider_node, consumers in consumer_map.items():
+ for c in consumers:
+ consumer_node = path_to_node(c)
+ if installed_instance.isowner(c) and \
+ consumer_node not in provider_nodes:
+ # This is not a provider, so it will be uninstalled.
+ continue
+ lib_graph.add(provider_node, consumer_node)
+
+ # Locate nodes which should be preserved. They consist of all
+ # providers that are reachable from consumers that are not
+ # providers themselves.
+ preserve_nodes = set()
+ for consumer_node in lib_graph.root_nodes():
+ if consumer_node in provider_nodes:
+ continue
+ # Preserve all providers that are reachable from this consumer.
+ node_stack = lib_graph.child_nodes(consumer_node)
+ while node_stack:
+ provider_node = node_stack.pop()
+ if provider_node in preserve_nodes:
+ continue
+ preserve_nodes.add(provider_node)
+ node_stack.extend(lib_graph.child_nodes(provider_node))
+
+ preserve_paths = set()
+ for preserve_node in preserve_nodes:
+ # Preserve the library itself, and also preserve the
+ # soname symlink which is the only symlink that is
+ # strictly required.
+ hardlinks = set()
+ soname_symlinks = set()
+ soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
+ for f in preserve_node.alt_paths:
+ f_abs = os.path.join(root, f.lstrip(os.sep))
+ try:
+ if stat.S_ISREG(os.lstat(f_abs).st_mode):
+ hardlinks.add(f)
+ elif os.path.basename(f) == soname:
+ soname_symlinks.add(f)
+ except OSError:
+ pass
+
+ if hardlinks:
+ preserve_paths.update(hardlinks)
+ preserve_paths.update(soname_symlinks)
+
+ return preserve_paths
+
+ def _add_preserve_libs_to_contents(self, preserve_paths):
+ """
+ Preserve libs returned from _find_libs_to_preserve().
+ """
+
+ if not preserve_paths:
+ return
+
+ os = _os_merge
+ showMessage = self._display_merge
+ root = self.settings['ROOT']
+
+ # Copy contents entries from the old package to the new one.
+ new_contents = self.getcontents().copy()
+ old_contents = self._installed_instance.getcontents()
+ for f in sorted(preserve_paths):
+ f = _unicode_decode(f,
+ encoding=_encodings['content'], errors='strict')
+ f_abs = os.path.join(root, f.lstrip(os.sep))
+ contents_entry = old_contents.get(f_abs)
+ if contents_entry is None:
+ # This will probably never happen, but it might if one of the
+ # paths returned from findConsumers() refers to one of the libs
+ # that should be preserved yet the path is not listed in the
+ # contents. Such a path might belong to some other package, so
+ # it shouldn't be preserved here.
+ showMessage(_("!!! File '%s' will not be preserved "
+ "due to missing contents entry\n") % (f_abs,),
+ level=logging.ERROR, noiselevel=-1)
+ preserve_paths.remove(f)
+ continue
+ new_contents[f_abs] = contents_entry
+ obj_type = contents_entry[0]
+ showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
+ noiselevel=-1)
+ # Add parent directories to contents if necessary.
+ parent_dir = os.path.dirname(f_abs)
+ while len(parent_dir) > len(root):
+ new_contents[parent_dir] = ["dir"]
+ prev = parent_dir
+ parent_dir = os.path.dirname(parent_dir)
+ if prev == parent_dir:
+ break
+ outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
+ write_contents(new_contents, root, outfile)
+ outfile.close()
+ self._clear_contents_cache()
+
+ def _find_unused_preserved_libs(self, unmerge_no_replacement):
+ """
+ Find preserved libraries that don't have any consumers left.
+ """
+
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ not self.vartree.dbapi._plib_registry.hasEntries():
+ return {}
+
+ # Since preserved libraries can be consumers of other preserved
+ # libraries, use a graph to track consumer relationships.
+ plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
+ linkmap = self.vartree.dbapi._linkmap
+ lib_graph = digraph()
+ preserved_nodes = set()
+ preserved_paths = set()
+ path_cpv_map = {}
+ path_node_map = {}
+ root = self.settings['ROOT']
+
+ def path_to_node(path):
+ node = path_node_map.get(path)
+ if node is None:
+ node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
+ alt_path_node = lib_graph.get(node)
+ if alt_path_node is not None:
+ node = alt_path_node
+ node.alt_paths.add(path)
+ path_node_map[path] = node
+ return node
+
+ for cpv, plibs in plib_dict.items():
+ for f in plibs:
+ path_cpv_map[f] = cpv
+ preserved_node = path_to_node(f)
+ if not preserved_node.file_exists():
+ continue
+ lib_graph.add(preserved_node, None)
+ preserved_paths.add(f)
+ preserved_nodes.add(preserved_node)
+ for c in self.vartree.dbapi._linkmap.findConsumers(f):
+ consumer_node = path_to_node(c)
+ if not consumer_node.file_exists():
+ continue
+ # Note that consumers may also be providers.
+ lib_graph.add(preserved_node, consumer_node)
+
+ # Eliminate consumers having providers with the same soname as an
+ # installed library that is not preserved. This eliminates
+ # libraries that are erroneously preserved due to a move from one
+ # directory to another.
+ # Also eliminate consumers that are going to be unmerged if
+ # unmerge_no_replacement is True.
+ provider_cache = {}
+ for preserved_node in preserved_nodes:
+ soname = linkmap.getSoname(preserved_node)
+ for consumer_node in lib_graph.parent_nodes(preserved_node):
+ if consumer_node in preserved_nodes:
+ continue
+ if unmerge_no_replacement:
+ will_be_unmerged = True
+ for path in consumer_node.alt_paths:
+ if not self.isowner(path):
+ will_be_unmerged = False
+ break
+ if will_be_unmerged:
+ # This consumer is not preserved and it is
+ # being unmerged, so drop this edge.
+ lib_graph.remove_edge(preserved_node, consumer_node)
+ continue
+
+ providers = provider_cache.get(consumer_node)
+ if providers is None:
+ providers = linkmap.findProviders(consumer_node)
+ provider_cache[consumer_node] = providers
+ providers = providers.get(soname)
+ if providers is None:
+ continue
+ for provider in providers:
+ if provider in preserved_paths:
+ continue
+ provider_node = path_to_node(provider)
+ if not provider_node.file_exists():
+ continue
+ if provider_node in preserved_nodes:
+ continue
+ # An alternative provider seems to be
+ # installed, so drop this edge.
+ lib_graph.remove_edge(preserved_node, consumer_node)
+ break
+
+ cpv_lib_map = {}
+ while lib_graph:
+ root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
+ if not root_nodes:
+ break
+ lib_graph.difference_update(root_nodes)
+ unlink_list = set()
+ for node in root_nodes:
+ unlink_list.update(node.alt_paths)
+ unlink_list = sorted(unlink_list)
+ for obj in unlink_list:
+ cpv = path_cpv_map.get(obj)
+ if cpv is None:
+ # This means that a symlink is in the preserved libs
+ # registry, but the actual lib it points to is not.
+ self._display_merge(_("!!! symlink to lib is preserved, "
+ "but not the lib itself:\n!!! '%s'\n") % (obj,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ removed = cpv_lib_map.get(cpv)
+ if removed is None:
+ removed = set()
+ cpv_lib_map[cpv] = removed
+ removed.add(obj)
+
+ return cpv_lib_map
+
+ def _remove_preserved_libs(self, cpv_lib_map):
+ """
+ Remove files returned from _find_unused_preserved_libs().
+ """
+
+ os = _os_merge
+
+ files_to_remove = set()
+ for files in cpv_lib_map.values():
+ files_to_remove.update(files)
+ files_to_remove = sorted(files_to_remove)
+ showMessage = self._display_merge
+ root = self.settings['ROOT']
+
+ parent_dirs = set()
+ for obj in files_to_remove:
+ obj = os.path.join(root, obj.lstrip(os.sep))
+ parent_dirs.add(os.path.dirname(obj))
+ if os.path.islink(obj):
+ obj_type = _("sym")
+ else:
+ obj_type = _("obj")
+ try:
+ os.unlink(obj)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ else:
+ showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
+ noiselevel=-1)
+
+ # Remove empty parent directories if possible.
+ while parent_dirs:
+ x = parent_dirs.pop()
+ while True:
+ try:
+ os.rmdir(x)
+ except OSError:
+ break
+ prev = x
+ x = os.path.dirname(x)
+ if x == prev:
+ break
+
+ self.vartree.dbapi._plib_registry.pruneNonExisting()
+
+ def _collision_protect(self, srcroot, destroot, mypkglist,
+ file_list, symlink_list):
+
+ os = _os_merge
+
+ collision_ignore = set([normalize_path(myignore) for myignore in \
+ portage.util.shlex_split(
+ self.settings.get("COLLISION_IGNORE", ""))])
+
+ # For collisions with preserved libraries, the current package
+ # will assume ownership and the libraries will be unregistered.
+ if self.vartree.dbapi._plib_registry is None:
+ # preserve-libs is entirely disabled
+ plib_cpv_map = None
+ plib_paths = None
+ plib_inodes = {}
+ else:
+ plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
+ plib_cpv_map = {}
+ plib_paths = set()
+ for cpv, paths in plib_dict.items():
+ plib_paths.update(paths)
+ for f in paths:
+ plib_cpv_map[f] = cpv
+ plib_inodes = self._lstat_inode_map(plib_paths)
+
+ plib_collisions = {}
+
+ showMessage = self._display_merge
+ stopmerge = False
+ collisions = []
+ symlink_collisions = []
+ destroot = self.settings['ROOT']
+ showMessage(_(" %s checking %d files for package collisions\n") % \
+ (colorize("GOOD", "*"), len(file_list) + len(symlink_list)))
+ for i, (f, f_type) in enumerate(chain(
+ ((f, "reg") for f in file_list),
+ ((f, "sym") for f in symlink_list))):
+ if i % 1000 == 0 and i != 0:
+ showMessage(_("%d files checked ...\n") % i)
+
+ dest_path = normalize_path(
+ os.path.join(destroot, f.lstrip(os.path.sep)))
+ try:
+ dest_lstat = os.lstat(dest_path)
+ except EnvironmentError as e:
+ if e.errno == errno.ENOENT:
+ del e
+ continue
+ elif e.errno == errno.ENOTDIR:
+ del e
+ # A non-directory is in a location where this package
+ # expects to have a directory.
+ dest_lstat = None
+ parent_path = dest_path
+ while len(parent_path) > len(destroot):
+ parent_path = os.path.dirname(parent_path)
+ try:
+ dest_lstat = os.lstat(parent_path)
+ break
+ except EnvironmentError as e:
+ if e.errno != errno.ENOTDIR:
+ raise
+ del e
+ if not dest_lstat:
+ raise AssertionError(
+ "unable to find non-directory " + \
+ "parent for '%s'" % dest_path)
+ dest_path = parent_path
+ f = os.path.sep + dest_path[len(destroot):]
+ if f in collisions:
+ continue
+ else:
+ raise
+ if f[0] != "/":
+ f="/"+f
+
+ if stat.S_ISDIR(dest_lstat.st_mode):
+ if f_type == "sym":
+ # This case is explicitly banned
+ # by PMS (see bug #326685).
+ symlink_collisions.append(f)
+ collisions.append(f)
+ continue
+
+ plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
+ if plibs:
+ for path in plibs:
+ cpv = plib_cpv_map[path]
+ paths = plib_collisions.get(cpv)
+ if paths is None:
+ paths = set()
+ plib_collisions[cpv] = paths
+ paths.add(path)
+ # The current package will assume ownership and the
+ # libraries will be unregistered, so exclude this
+ # path from the normal collisions.
+ continue
+
+ isowned = False
+ full_path = os.path.join(destroot, f.lstrip(os.path.sep))
+ for ver in mypkglist:
+ if ver.isowner(f):
+ isowned = True
+ break
+ if not isowned and self.isprotected(full_path):
+ isowned = True
+ if not isowned:
+ stopmerge = True
+ if collision_ignore:
+ if f in collision_ignore:
+ stopmerge = False
+ else:
+ for myignore in collision_ignore:
+ if f.startswith(myignore + os.path.sep):
+ stopmerge = False
+ break
+ if stopmerge:
+ collisions.append(f)
+ return collisions, symlink_collisions, plib_collisions
+
+ def _lstat_inode_map(self, path_iter):
+ """
+ Use lstat to create a map of the form:
+ {(st_dev, st_ino) : set([path1, path2, ...])}
+ Multiple paths may reference the same inode due to hardlinks.
+ All lstat() calls are relative to self.myroot.
+ """
+
+ os = _os_merge
+
+ root = self.settings['ROOT']
+ inode_map = {}
+ for f in path_iter:
+ path = os.path.join(root, f.lstrip(os.sep))
+ try:
+ st = os.lstat(path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+ del e
+ continue
+ key = (st.st_dev, st.st_ino)
+ paths = inode_map.get(key)
+ if paths is None:
+ paths = set()
+ inode_map[key] = paths
+ paths.add(f)
+ return inode_map
+
+ def _security_check(self, installed_instances):
+ if not installed_instances:
+ return 0
+
+ os = _os_merge
+
+ showMessage = self._display_merge
+
+ file_paths = set()
+ for dblnk in installed_instances:
+ file_paths.update(dblnk.getcontents())
+ inode_map = {}
+ real_paths = set()
+ for i, path in enumerate(file_paths):
+
+ if os is _os_merge:
+ try:
+ _unicode_encode(path,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(path,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ try:
+ s = os.lstat(path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+ del e
+ continue
+ if not stat.S_ISREG(s.st_mode):
+ continue
+ path = os.path.realpath(path)
+ if path in real_paths:
+ continue
+ real_paths.add(path)
+ if s.st_nlink > 1 and \
+ s.st_mode & (stat.S_ISUID | stat.S_ISGID):
+ k = (s.st_dev, s.st_ino)
+ inode_map.setdefault(k, []).append((path, s))
+ suspicious_hardlinks = []
+ for path_list in inode_map.values():
+ path, s = path_list[0]
+ if len(path_list) == s.st_nlink:
+ # All hardlinks seem to be owned by this package.
+ continue
+ suspicious_hardlinks.append(path_list)
+ if not suspicious_hardlinks:
+ return 0
+
+ msg = []
+ msg.append(_("suid/sgid file(s) "
+ "with suspicious hardlink(s):"))
+ msg.append("")
+ for path_list in suspicious_hardlinks:
+ for path, s in path_list:
+ msg.append("\t%s" % path)
+ msg.append("")
+ msg.append(_("See the Gentoo Security Handbook "
+ "guide for advice on how to proceed."))
+
+ self._eerror("preinst", msg)
+
+ return 1
+
+ def _eqawarn(self, phase, lines):
+ self._elog("eqawarn", phase, lines)
+
+ def _eerror(self, phase, lines):
+ self._elog("eerror", phase, lines)
+
+ def _elog(self, funcname, phase, lines):
+ func = getattr(portage.elog.messages, funcname)
+ if self._scheduler is None:
+ for l in lines:
+ func(l, phase=phase, key=self.mycpv)
+ else:
+ background = self.settings.get("PORTAGE_BACKGROUND") == "1"
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ out = io.StringIO()
+ for line in lines:
+ func(line, phase=phase, key=self.mycpv, out=out)
+ msg = out.getvalue()
+ self._scheduler.output(msg,
+ background=background, log_path=log_path)
+
+ def _elog_process(self, phasefilter=None):
+ cpv = self.mycpv
+ if self._pipe is None:
+ elog_process(cpv, self.settings, phasefilter=phasefilter)
+ else:
+ logdir = os.path.join(self.settings["T"], "logging")
+ ebuild_logentries = collect_ebuild_messages(logdir)
+ py_logentries = collect_messages(key=cpv).get(cpv, {})
+ logentries = _merge_logentries(py_logentries, ebuild_logentries)
+ funcnames = {
+ "INFO": "einfo",
+ "LOG": "elog",
+ "WARN": "ewarn",
+ "QA": "eqawarn",
+ "ERROR": "eerror"
+ }
+ str_buffer = []
+ for phase, messages in logentries.items():
+ for key, lines in messages:
+ funcname = funcnames[key]
+ if isinstance(lines, basestring):
+ lines = [lines]
+ for line in lines:
+ fields = (funcname, phase, cpv, line.rstrip('\n'))
+ str_buffer.append(' '.join(fields))
+ str_buffer.append('\n')
+ if str_buffer:
+ os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
+
+ def _emerge_log(self, msg):
+ emergelog(False, msg)
+
+ def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
+ mydbapi=None, prev_mtimes=None, counter=None):
+ """
+
+ This function does the following:
+
+ calls self._preserve_libs if FEATURES=preserve-libs
+ calls self._collision_protect if FEATURES=collision-protect
+ calls doebuild(mydo=pkg_preinst)
+ Merges the package to the livefs
+ unmerges old version (if required)
+ calls doebuild(mydo=pkg_postinst)
+ calls env_update
+
+ @param srcroot: Typically this is ${D}
+ @type srcroot: String (Path)
+ @param destroot: ignored, self.settings['ROOT'] is used instead
+ @type destroot: String (Path)
+ @param inforoot: root of the vardb entry ?
+ @type inforoot: String (Path)
+ @param myebuild: path to the ebuild that we are processing
+ @type myebuild: String (Path)
+ @param mydbapi: dbapi which is handed to doebuild.
+ @type mydbapi: portdbapi instance
+ @param prev_mtimes: { Filename:mtime } mapping for env_update
+ @type prev_mtimes: Dictionary
+ @rtype: Boolean
+ @returns:
+ 1. 0 on success
+ 2. 1 on failure
+
+ secondhand is a list of symlinks that have been skipped due to their target
+ not existing; we will merge these symlinks at a later time.
+ """
+
+ os = _os_merge
+
+ srcroot = _unicode_decode(srcroot,
+ encoding=_encodings['content'], errors='strict')
+ destroot = self.settings['ROOT']
+ inforoot = _unicode_decode(inforoot,
+ encoding=_encodings['content'], errors='strict')
+ myebuild = _unicode_decode(myebuild,
+ encoding=_encodings['content'], errors='strict')
+
+ showMessage = self._display_merge
+ srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
+
+ if not os.path.isdir(srcroot):
+ showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ slot = ''
+ for var_name in ('CHOST', 'SLOT'):
+ if var_name == 'CHOST' and self.cat == 'virtual':
+ try:
+ os.unlink(os.path.join(inforoot, var_name))
+ except OSError:
+ pass
+ continue
+
+ try:
+ val = io.open(_unicode_encode(
+ os.path.join(inforoot, var_name),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace').readline().strip()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ val = ''
+
+ if var_name == 'SLOT':
+ slot = val
+
+ if not slot.strip():
+ slot = self.settings.get(var_name, '')
+ if not slot.strip():
+ showMessage(_("!!! SLOT is undefined\n"),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ write_atomic(os.path.join(inforoot, var_name), slot + '\n')
+
+ if val != self.settings.get(var_name, ''):
+ self._eqawarn('preinst',
+ [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
+ {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
+
+ def eerror(lines):
+ self._eerror("preinst", lines)
+
+ if not os.path.exists(self.dbcatdir):
+ ensure_dirs(self.dbcatdir)
+
+ otherversions = []
+ for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
+ otherversions.append(v.split("/")[1])
+
+ cp = self.mysplit[0]
+ slot_atom = "%s:%s" % (cp, slot)
+
+ # filter any old-style virtual matches
+ slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
+ if cpv_getkey(cpv) == cp]
+
+ if self.mycpv not in slot_matches and \
+ self.vartree.dbapi.cpv_exists(self.mycpv):
+ # handle multislot or unapplied slotmove
+ slot_matches.append(self.mycpv)
+
+ others_in_slot = []
+ from portage import config
+ for cur_cpv in slot_matches:
+ # Clone the config in case one of these has to be unmerged since
+ # we need it to have private ${T} etc... for things like elog.
+ settings_clone = config(clone=self.settings)
+ settings_clone.pop("PORTAGE_BUILDIR_LOCKED", None)
+ settings_clone.reset()
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=settings_clone,
+ vartree=self.vartree, treetype="vartree",
+ scheduler=self._scheduler, pipe=self._pipe))
+
+ retval = self._security_check(others_in_slot)
+ if retval:
+ return retval
+
+ if slot_matches:
+ # Used by self.isprotected().
+ max_dblnk = None
+ max_counter = -1
+ for dblnk in others_in_slot:
+ cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
+ if cur_counter > max_counter:
+ max_counter = cur_counter
+ max_dblnk = dblnk
+ self._installed_instance = max_dblnk
+
+ # We check for unicode encoding issues after src_install. However,
+ # the check must be repeated here for binary packages (it's
+ # inexpensive since we call os.walk() here anyway).
+ unicode_errors = []
+
+ while True:
+
+ unicode_error = False
+
+ myfilelist = []
+ mylinklist = []
+ paths_with_newlines = []
+ srcroot_len = len(srcroot)
+ def onerror(e):
+ raise
+ for parent, dirs, files in os.walk(srcroot, onerror=onerror):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ new_parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='replace')
+ new_parent = _unicode_encode(new_parent,
+ encoding=_encodings['merge'], errors='backslashreplace')
+ new_parent = _unicode_decode(new_parent,
+ encoding=_encodings['merge'], errors='replace')
+ os.rename(parent, new_parent)
+ unicode_error = True
+ unicode_errors.append(new_parent[srcroot_len:])
+ break
+
+ for fname in files:
+ try:
+ fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ fpath = portage._os.path.join(
+ parent.encode(_encodings['merge']), fname)
+ new_fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fname = _unicode_encode(new_fname,
+ encoding=_encodings['merge'], errors='backslashreplace')
+ new_fname = _unicode_decode(new_fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fpath = os.path.join(parent, new_fname)
+ os.rename(fpath, new_fpath)
+ unicode_error = True
+ unicode_errors.append(new_fpath[srcroot_len:])
+ fname = new_fname
+ fpath = new_fpath
+ else:
+ fpath = os.path.join(parent, fname)
+
+ relative_path = fpath[srcroot_len:]
+
+ if "\n" in relative_path:
+ paths_with_newlines.append(relative_path)
+
+ file_mode = os.lstat(fpath).st_mode
+ if stat.S_ISREG(file_mode):
+ myfilelist.append(relative_path)
+ elif stat.S_ISLNK(file_mode):
+ # Note: os.walk puts symlinks to directories in the "dirs"
+ # list and it does not traverse them since that could lead
+ # to an infinite recursion loop.
+ mylinklist.append(relative_path)
+
+ if unicode_error:
+ break
+
+ if not unicode_error:
+ break
+
+ if unicode_errors:
+ eerror(portage._merge_unicode_error(unicode_errors))
+
+ if paths_with_newlines:
+ msg = []
+ msg.append(_("This package installs one or more files containing a newline (\\n) character:"))
+ msg.append("")
+ paths_with_newlines.sort()
+ for f in paths_with_newlines:
+ msg.append("\t/%s" % (f.replace("\n", "\\n")))
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.mycpv)
+ msg.append("")
+ eerror(msg)
+ return 1
+
+ # If there are no files to merge, and an installed package in the same
+ # slot has files, it probably means that something went wrong.
+ if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
+ not myfilelist and not mylinklist and others_in_slot:
+ installed_files = None
+ for other_dblink in others_in_slot:
+ installed_files = other_dblink.getcontents()
+ if not installed_files:
+ continue
+ from textwrap import wrap
+ wrap_width = 72
+ msg = []
+ d = {
+ "new_cpv":self.mycpv,
+ "old_cpv":other_dblink.mycpv
+ }
+ msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
+ "any files, but the currently installed '%(old_cpv)s'"
+ " package has the following files: ") % d, wrap_width))
+ msg.append("")
+ msg.extend(sorted(installed_files))
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.mycpv)
+ msg.append("")
+ msg.extend(wrap(
+ _("Manually run `emerge --unmerge =%s` if you "
+ "really want to remove the above files. Set "
+ "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
+ "/etc/make.conf if you do not want to "
+ "abort in cases like this.") % other_dblink.mycpv,
+ wrap_width))
+ eerror(msg)
+ if installed_files:
+ return 1
+
+ # check for package collisions
+ blockers = self._blockers
+ if blockers is None:
+ blockers = []
+ collisions, symlink_collisions, plib_collisions = \
+ self._collision_protect(srcroot, destroot,
+ others_in_slot + blockers, myfilelist, mylinklist)
+
+ # Make sure the ebuild environment is initialized and that ${T}/elog
+ # exists for logging of collision-protect eerror messages.
+ if myebuild is None:
+ myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+ doebuild_environment(myebuild, "preinst",
+ settings=self.settings, db=mydbapi)
+ self.settings["REPLACING_VERSIONS"] = " ".join(
+ [portage.versions.cpv_getversion(other.mycpv)
+ for other in others_in_slot])
+ prepare_build_dirs(settings=self.settings, cleanup=cleanup)
+
+ if collisions:
+ collision_protect = "collision-protect" in self.settings.features
+ protect_owned = "protect-owned" in self.settings.features
+ msg = _("This package will overwrite one or more files that"
+ " may belong to other packages (see list below).")
+ if not (collision_protect or protect_owned):
+ msg += _(" Add either \"collision-protect\" or"
+ " \"protect-owned\" to FEATURES in"
+ " make.conf if you would like the merge to abort"
+ " in cases like this. See the make.conf man page for"
+ " more information about these features.")
+ if self.settings.get("PORTAGE_QUIET") != "1":
+ msg += _(" You can use a command such as"
+ " `portageq owners / <filename>` to identify the"
+ " installed package that owns a file. If portageq"
+ " reports that only one package owns a file then do NOT"
+ " file a bug report. A bug report is only useful if it"
+ " identifies at least two or more packages that are known"
+ " to install the same file(s)."
+ " If a collision occurs and you"
+ " can not explain where the file came from then you"
+ " should simply ignore the collision since there is not"
+ " enough information to determine if a real problem"
+ " exists. Please do NOT file a bug report at"
+ " http://bugs.gentoo.org unless you report exactly which"
+ " two packages install the same file(s). Once again,"
+ " please do NOT file a bug report unless you have"
+ " completely understood the above message.")
+
+ self.settings["EBUILD_PHASE"] = "preinst"
+ from textwrap import wrap
+ msg = wrap(msg, 70)
+ if collision_protect:
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.settings.mycpv)
+ msg.append("")
+ msg.append(_("Detected file collision(s):"))
+ msg.append("")
+
+ for f in collisions:
+ msg.append("\t%s" % \
+ os.path.join(destroot, f.lstrip(os.path.sep)))
+
+ eerror(msg)
+
+ owners = None
+ if collision_protect or protect_owned or symlink_collisions:
+ msg = []
+ msg.append("")
+ msg.append(_("Searching all installed"
+ " packages for file collisions..."))
+ msg.append("")
+ msg.append(_("Press Ctrl-C to Stop"))
+ msg.append("")
+ eerror(msg)
+
+ if len(collisions) > 20:
+ # get_owners is slow for large numbers of files, so
+ # don't look them all up.
+ collisions = collisions[:20]
+ self.lockdb()
+ try:
+ owners = self.vartree.dbapi._owners.get_owners(collisions)
+ self.vartree.dbapi.flush_cache()
+ finally:
+ self.unlockdb()
+
+ for pkg, owned_files in owners.items():
+ cpv = pkg.mycpv
+ msg = []
+ msg.append("%s" % cpv)
+ for f in sorted(owned_files):
+ msg.append("\t%s" % os.path.join(destroot,
+ f.lstrip(os.path.sep)))
+ msg.append("")
+ eerror(msg)
+
+ if not owners:
+ eerror([_("None of the installed"
+ " packages claim the file(s)."), ""])
+
+ # The explanation about the collision and how to solve
+ # it may not be visible via a scrollback buffer, especially
+ # if the number of file collisions is large. Therefore,
+ # show a summary at the end.
+ abort = False
+ if collision_protect:
+ abort = True
+ msg = _("Package '%s' NOT merged due to file collisions.") % \
+ self.settings.mycpv
+ elif protect_owned and owners:
+ abort = True
+ msg = _("Package '%s' NOT merged due to file collisions.") % \
+ self.settings.mycpv
+ elif symlink_collisions:
+ abort = True
+ msg = _("Package '%s' NOT merged due to collision " + \
+ "between a symlink and a directory which is explicitly " + \
+ "forbidden by PMS (see bug #326685).") % \
+ (self.settings.mycpv,)
+ else:
+ msg = _("Package '%s' merged despite file collisions.") % \
+ self.settings.mycpv
+ msg += _(" If necessary, refer to your elog "
+ "messages for the whole content of the above message.")
+ eerror(wrap(msg, 70))
+
+ if abort:
+ return 1
+
+ # The merge process may move files out of the image directory,
+ # which causes invalidation of the .installed flag.
+ try:
+ os.unlink(os.path.join(
+ os.path.dirname(normalize_path(srcroot)), ".installed"))
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ self.dbdir = self.dbtmpdir
+ self.delete()
+ ensure_dirs(self.dbtmpdir)
+
+ # run preinst script
+ showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
+ {"cpv":self.mycpv, "destroot":destroot})
+ phase = EbuildPhase(background=False, phase="preinst",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ a = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if a != os.EX_OK:
+ showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
+ level=logging.ERROR, noiselevel=-1)
+ return a
+
+ # copy "info" files (like SLOT, CFLAGS, etc.) into the database
+ for x in os.listdir(inforoot):
+ self.copyfile(inforoot+"/"+x)
+
+ # write local package counter for recording
+ if counter is None:
+ counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
+ io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace').write(_unicode_decode(str(counter)))
+
+ self.updateprotect()
+
+ #if we have a file containing previously-merged config file md5sums, grab it.
+ self.vartree.dbapi._fs_lock()
+ try:
+ cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
+ if "NOCONFMEM" in self.settings:
+ cfgfiledict["IGNORE"]=1
+ else:
+ cfgfiledict["IGNORE"]=0
+
+ # Always behave like --noconfmem is enabled for downgrades
+ # so that people who don't know about this option are less
+ # likely to get confused when doing upgrade/downgrade cycles.
+ pv_split = catpkgsplit(self.mycpv)[1:]
+ for other in others_in_slot:
+ if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
+ cfgfiledict["IGNORE"] = 1
+ break
+
+ rval = self._merge_contents(srcroot, destroot, cfgfiledict)
+ if rval != os.EX_OK:
+ return rval
+ finally:
+ self.vartree.dbapi._fs_unlock()
+
+ # These caches are populated during collision-protect and the data
+ # they contain is now invalid. It's very important to invalidate
+ # the contents_inodes cache so that FEATURES=unmerge-orphans
+ # doesn't unmerge anything that belongs to this package that has
+ # just been merged.
+ for dblnk in others_in_slot:
+ dblnk._clear_contents_cache()
+ self._clear_contents_cache()
+
+ linkmap = self.vartree.dbapi._linkmap
+ plib_registry = self.vartree.dbapi._plib_registry
+ # We initialize preserve_paths to an empty set rather
+ # than None here because it plays an important role
+ # in prune_plib_registry logic by serving to indicate
+ # that we have a replacement for a package that's
+ # being unmerged.
+
+ preserve_paths = set()
+ needed = None
+ if not (self._linkmap_broken or linkmap is None or
+ plib_registry is None):
+ self.vartree.dbapi._fs_lock()
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+ needed = os.path.join(inforoot, linkmap._needed_aux_key)
+ self._linkmap_rebuild(include_file=needed)
+
+ # Preserve old libs if they are still in use
+ # TODO: Handle cases where the previous instance
+ # has already been uninstalled but it still has some
+ # preserved libraries in the registry that we may
+ # want to preserve here.
+ preserve_paths = self._find_libs_to_preserve()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ if preserve_paths:
+ self._add_preserve_libs_to_contents(preserve_paths)
+
+ # If portage is reinstalling itself, remove the old
+ # version now since we want to use the temporary
+ # PORTAGE_BIN_PATH that will be removed when we return.
+ reinstall_self = False
+ if self.myroot == "/" and \
+ match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
+ reinstall_self = True
+
+ emerge_log = self._emerge_log
+
+ # If we have any preserved libraries then autoclean
+ # is forced so that preserve-libs logic doesn't have
+ # to account for the additional complexity of the
+ # AUTOCLEAN=no mode.
+ autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" \
+ or preserve_paths
+
+ if autoclean:
+ emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
+
+ others_in_slot.append(self) # self has just been merged
+ for dblnk in list(others_in_slot):
+ if dblnk is self:
+ continue
+ if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
+ continue
+ showMessage(_(">>> Safely unmerging already-installed instance...\n"))
+ emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
+ others_in_slot.remove(dblnk) # dblnk will unmerge itself now
+ dblnk._linkmap_broken = self._linkmap_broken
+ dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
+ dblnk.settings.backup_changes("REPLACED_BY_VERSION")
+ unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
+ others_in_slot=others_in_slot, needed=needed,
+ preserve_paths=preserve_paths)
+ dblnk.settings.pop("REPLACED_BY_VERSION", None)
+
+ if unmerge_rval == os.EX_OK:
+ emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
+ else:
+ emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
+
+ self.lockdb()
+ try:
+ # TODO: Check status and abort if necessary.
+ dblnk.delete()
+ finally:
+ self.unlockdb()
+ showMessage(_(">>> Original instance of package unmerged safely.\n"))
+
+ if len(others_in_slot) > 1:
+ showMessage(colorize("WARN", _("WARNING:"))
+ + _(" AUTOCLEAN is disabled. This can cause serious"
+ " problems due to overlapping packages.\n"),
+ level=logging.WARN, noiselevel=-1)
+
+ # We hold both directory locks.
+ self.dbdir = self.dbpkgdir
+ self.lockdb()
+ try:
+ self.delete()
+ _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+ finally:
+ self.unlockdb()
+
+ # Check for file collisions with blocking packages
+ # and remove any colliding files from their CONTENTS
+ # since they now belong to this package.
+ self._clear_contents_cache()
+ contents = self.getcontents()
+ destroot_len = len(destroot) - 1
+ self.lockdb()
+ try:
+ for blocker in blockers:
+ self.vartree.dbapi.removeFromContents(blocker, iter(contents),
+ relative_paths=False)
+ finally:
+ self.unlockdb()
+
+ plib_registry = self.vartree.dbapi._plib_registry
+ if plib_registry:
+ self.vartree.dbapi._fs_lock()
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+
+ if preserve_paths:
+ # keep track of the libs we preserved
+ plib_registry.register(self.mycpv, slot, counter,
+ sorted(preserve_paths))
+
+ # Unregister any preserved libs that this package has overwritten
+ # and update the contents of the packages that owned them.
+ plib_dict = plib_registry.getPreservedLibs()
+ for cpv, paths in plib_collisions.items():
+ if cpv not in plib_dict:
+ continue
+ has_vdb_entry = False
+ if cpv != self.mycpv:
+ # If we've replaced another instance with the
+ # same cpv then the vdb entry no longer belongs
+ # to it, so we'll have to get the slot and counter
+ # from plib_registry._data instead.
+ self.vartree.dbapi.lock()
+ try:
+ try:
+ slot, counter = self.vartree.dbapi.aux_get(
+ cpv, ["SLOT", "COUNTER"])
+ except KeyError:
+ pass
+ else:
+ has_vdb_entry = True
+ self.vartree.dbapi.removeFromContents(
+ cpv, paths)
+ finally:
+ self.vartree.dbapi.unlock()
+
+ if not has_vdb_entry:
+ # It's possible for previously unmerged packages
+ # to have preserved libs in the registry, so try
+ # to retrieve the slot and counter from there.
+ has_registry_entry = False
+ for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
+ plib_registry._data.items():
+ if plib_cpv != cpv:
+ continue
+ try:
+ cp, slot = plib_cps.split(":", 1)
+ except ValueError:
+ continue
+ counter = plib_counter
+ has_registry_entry = True
+ break
+
+ if not has_registry_entry:
+ continue
+
+ remaining = [f for f in plib_dict[cpv] if f not in paths]
+ plib_registry.register(cpv, slot, counter, remaining)
+
+ plib_registry.store()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ self.vartree.dbapi._add(self)
+ contents = self.getcontents()
+
+ #do postinst script
+ self.settings["PORTAGE_UPDATE_ENV"] = \
+ os.path.join(self.dbpkgdir, "environment.bz2")
+ self.settings.backup_changes("PORTAGE_UPDATE_ENV")
+ try:
+ phase = EbuildPhase(background=False, phase="postinst",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ a = phase.wait()
+ if a == os.EX_OK:
+ showMessage(_(">>> %s merged.\n") % self.mycpv)
+ finally:
+ self.settings.pop("PORTAGE_UPDATE_ENV", None)
+
+ if a != os.EX_OK:
+ # It's stupid to bail out here, so keep going regardless of
+ # phase return code.
+ showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
+ level=logging.ERROR, noiselevel=-1)
+
+ downgrade = False
+ for v in otherversions:
+ if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
+ downgrade = True
+
+ # Lock the config memory file to prevent symlink creation
+ # in merge_contents from overlapping with env-update.
+ self.vartree.dbapi._fs_lock()
+ try:
+ #update environment settings, library paths. DO NOT change symlinks.
+ env_update(makelinks=(not downgrade),
+ target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
+ contents=contents, env=self.settings.environ(),
+ writemsg_level=self._display_merge)
+ finally:
+ self.vartree.dbapi._fs_unlock()
+
+ # For gcc upgrades, preserved libs have to be removed after the
+ # the library path has been updated.
+ self._prune_plib_registry()
+
+ return os.EX_OK
+
+ def _new_backup_path(self, p):
+ """
+ The works for any type path, such as a regular file, symlink,
+ or directory. The parent directory is assumed to exist.
+ The returned filename is of the form p + '.backup.' + x, where
+ x guarantees that the returned path does not exist yet.
+ """
+ os = _os_merge
+
+ x = -1
+ while True:
+ x += 1
+ backup_p = p + '.backup.' + str(x).rjust(4, '0')
+ try:
+ os.lstat(backup_p)
+ except OSError:
+ break
+
+ return backup_p
+
+ def _merge_contents(self, srcroot, destroot, cfgfiledict):
+
+ cfgfiledict_orig = cfgfiledict.copy()
+
+ # open CONTENTS file (possibly overwriting old one) for recording
+ # Use atomic_ofstream for automatic coercion of raw bytes to
+ # unicode, in order to prevent TypeError when writing raw bytes
+ # to TextIOWrapper with python2.
+ outfile = atomic_ofstream(_unicode_encode(
+ os.path.join(self.dbtmpdir, 'CONTENTS'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+
+ # Don't bump mtimes on merge since some application require
+ # preservation of timestamps. This means that the unmerge phase must
+ # check to see if file belongs to an installed instance in the same
+ # slot.
+ mymtime = None
+
+ # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
+ prevmask = os.umask(0)
+ secondhand = []
+
+ # we do a first merge; this will recurse through all files in our srcroot but also build up a
+ # "second hand" of symlinks to merge later
+ if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
+ return 1
+
+ # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
+ # broken symlinks. We'll merge them too.
+ lastlen = 0
+ while len(secondhand) and len(secondhand)!=lastlen:
+ # clear the thirdhand. Anything from our second hand that
+ # couldn't get merged will be added to thirdhand.
+
+ thirdhand = []
+ if self.mergeme(srcroot, destroot, outfile, thirdhand,
+ secondhand, cfgfiledict, mymtime):
+ return 1
+
+ #swap hands
+ lastlen = len(secondhand)
+
+ # our thirdhand now becomes our secondhand. It's ok to throw
+ # away secondhand since thirdhand contains all the stuff that
+ # couldn't be merged.
+ secondhand = thirdhand
+
+ if len(secondhand):
+ # force merge of remaining symlinks (broken or circular; oh well)
+ if self.mergeme(srcroot, destroot, outfile, None,
+ secondhand, cfgfiledict, mymtime):
+ return 1
+
+ #restore umask
+ os.umask(prevmask)
+
+ #if we opened it, close it
+ outfile.flush()
+ outfile.close()
+
+ # write out our collection of md5sums
+ if cfgfiledict != cfgfiledict_orig:
+ cfgfiledict.pop("IGNORE", None)
+ try:
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+ except InvalidLocation:
+ self.settings._init_dirs()
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+
+ return os.EX_OK
+
+ def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
+ """
+
+ This function handles actual merging of the package contents to the livefs.
+ It also handles config protection.
+
+ @param srcroot: Where are we copying files from (usually ${D})
+ @type srcroot: String (Path)
+ @param destroot: Typically ${ROOT}
+ @type destroot: String (Path)
+ @param outfile: File to log operations to
+ @type outfile: File Object
+ @param secondhand: A set of items to merge in pass two (usually
+ or symlinks that point to non-existing files that may get merged later)
+ @type secondhand: List
+ @param stufftomerge: Either a diretory to merge, or a list of items.
+ @type stufftomerge: String or List
+ @param cfgfiledict: { File:mtime } mapping for config_protected files
+ @type cfgfiledict: Dictionary
+ @param thismtime: The current time (typically long(time.time())
+ @type thismtime: Long
+ @rtype: None or Boolean
+ @returns:
+ 1. True on failure
+ 2. None otherwise
+
+ """
+
+ showMessage = self._display_merge
+ writemsg = self._display_merge
+
+ os = _os_merge
+ sep = os.sep
+ join = os.path.join
+ srcroot = normalize_path(srcroot).rstrip(sep) + sep
+ destroot = normalize_path(destroot).rstrip(sep) + sep
+ calc_prelink = "prelink-checksums" in self.settings.features
+
+ # this is supposed to merge a list of files. There will be 2 forms of argument passing.
+ if isinstance(stufftomerge, basestring):
+ #A directory is specified. Figure out protection paths, listdir() it and process it.
+ mergelist = os.listdir(join(srcroot, stufftomerge))
+ offset = stufftomerge
+ else:
+ mergelist = stufftomerge
+ offset = ""
+
+ for i, x in enumerate(mergelist):
+
+ mysrc = join(srcroot, offset, x)
+ mydest = join(destroot, offset, x)
+ # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
+ myrealdest = join(sep, offset, x)
+ # stat file once, test using S_* macros many times (faster that way)
+ mystat = os.lstat(mysrc)
+ mymode = mystat[stat.ST_MODE]
+ # handy variables; mydest is the target object on the live filesystems;
+ # mysrc is the source object in the temporary install dir
+ try:
+ mydstat = os.lstat(mydest)
+ mydmode = mydstat.st_mode
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ #dest file doesn't exist
+ mydstat = None
+ mydmode = None
+
+ if stat.S_ISLNK(mymode):
+ # we are merging a symbolic link
+ myabsto = abssymlink(mysrc)
+ if myabsto.startswith(srcroot):
+ myabsto = myabsto[len(srcroot):]
+ myabsto = myabsto.lstrip(sep)
+ myto = os.readlink(mysrc)
+ if self.settings and self.settings["D"]:
+ if myto.startswith(self.settings["D"]):
+ myto = myto[len(self.settings["D"]):]
+ # myrealto contains the path of the real file to which this symlink points.
+ # we can simply test for existence of this file to see if the target has been merged yet
+ myrealto = normalize_path(os.path.join(destroot, myabsto))
+ if mydmode!=None:
+ #destination exists
+ if stat.S_ISDIR(mydmode):
+ # we can't merge a symlink over a directory
+ newdest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a symlink is blocked by a directory:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This symlink will be merged with a different name:"))
+ msg.append(" '%s'" % newdest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ mydest = newdest
+
+ elif not stat.S_ISLNK(mydmode):
+ if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
+ # Kill file blocking installation of symlink to dir #71787
+ pass
+ elif self.isprotected(mydest):
+ # Use md5 of the target in ${D} if it exists...
+ try:
+ newmd5 = perform_md5(join(srcroot, myabsto))
+ except FileNotFound:
+ # Maybe the target is merged already.
+ try:
+ newmd5 = perform_md5(myrealto)
+ except FileNotFound:
+ newmd5 = None
+ mydest = new_protect_filename(mydest, newmd5=newmd5)
+
+ # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
+ if (secondhand != None) and (not os.path.exists(myrealto)):
+ # either the target directory doesn't exist yet or the target file doesn't exist -- or
+ # the target is a broken symlink. We will add this file to our "second hand" and merge
+ # it later.
+ secondhand.append(mysrc[len(srcroot):])
+ continue
+ # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
+ mymtime = movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ encoding=_encodings['merge'])
+ if mymtime != None:
+ showMessage(">>> %s -> %s\n" % (mydest, myto))
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ else:
+ showMessage(_("!!! Failed to move file.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ showMessage("!!! %s -> %s\n" % (mydest, myto),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ elif stat.S_ISDIR(mymode):
+ # we are merging a directory
+ if mydmode != None:
+ # destination exists
+
+ if bsd_chflags:
+ # Save then clear flags on dest.
+ dflags = mydstat.st_flags
+ if dflags != 0:
+ bsd_chflags.lchflags(mydest, 0)
+
+ if not os.access(mydest, os.W_OK):
+ pkgstuff = pkgsplit(self.pkg)
+ writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
+ writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
+ writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
+ writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
+ writemsg(_("!!! And finish by running this: env-update\n\n"))
+ return 1
+
+ if stat.S_ISDIR(mydmode) or \
+ (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
+ # a symlink to an existing directory will work for us; keep it:
+ showMessage("--- %s/\n" % mydest)
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ else:
+ # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
+ backup_dest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a directory is blocked by a file:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This file will be renamed to a different name:"))
+ msg.append(" '%s'" % backup_dest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ if movefile(mydest, backup_dest,
+ mysettings=self.settings,
+ encoding=_encodings['merge']) is None:
+ return 1
+ showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
+ level=logging.ERROR, noiselevel=-1)
+ #now create our directory
+ try:
+ if self.settings.selinux_enabled():
+ _selinux_merge.mkdir(mydest, mysrc)
+ else:
+ os.mkdir(mydest)
+ except OSError as e:
+ # Error handling should be equivalent to
+ # portage.util.ensure_dirs() for cases
+ # like bug #187518.
+ if e.errno in (errno.EEXIST,):
+ pass
+ elif os.path.isdir(mydest):
+ pass
+ else:
+ raise
+ del e
+
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ os.chmod(mydest, mystat[0])
+ os.chown(mydest, mystat[4], mystat[5])
+ showMessage(">>> %s/\n" % mydest)
+ else:
+ try:
+ #destination doesn't exist
+ if self.settings.selinux_enabled():
+ _selinux_merge.mkdir(mydest, mysrc)
+ else:
+ os.mkdir(mydest)
+ except OSError as e:
+ # Error handling should be equivalent to
+ # portage.util.ensure_dirs() for cases
+ # like bug #187518.
+ if e.errno in (errno.EEXIST,):
+ pass
+ elif os.path.isdir(mydest):
+ pass
+ else:
+ raise
+ del e
+ os.chmod(mydest, mystat[0])
+ os.chown(mydest, mystat[4], mystat[5])
+ showMessage(">>> %s/\n" % mydest)
+ outfile.write("dir "+myrealdest+"\n")
+ # recurse and merge this directory
+ if self.mergeme(srcroot, destroot, outfile, secondhand,
+ join(offset, x), cfgfiledict, thismtime):
+ return 1
+ elif stat.S_ISREG(mymode):
+ # we are merging a regular file
+ mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
+ # calculate config file protection stuff
+ mydestdir = os.path.dirname(mydest)
+ moveme = 1
+ zing = "!!!"
+ mymtime = None
+ protected = self.isprotected(mydest)
+ if mydmode != None:
+ # destination file exists
+
+ if stat.S_ISDIR(mydmode):
+ # install of destination is blocked by an existing directory with the same name
+ newdest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a regular file is blocked by a directory:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This file will be merged with a different name:"))
+ msg.append(" '%s'" % newdest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ mydest = newdest
+
+ elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
+ # install of destination is blocked by an existing regular file,
+ # or by a symlink to an existing regular file;
+ # now, config file management may come into play.
+ # we only need to tweak mydest if cfg file management is in play.
+ if protected:
+ # we have a protection path; enable config file management.
+ cfgprot = 0
+ destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
+ if mymd5 == destmd5:
+ #file already in place; simply update mtimes of destination
+ moveme = 1
+ else:
+ if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
+ """ An identical update has previously been
+ merged. Skip it unless the user has chosen
+ --noconfmem."""
+ moveme = cfgfiledict["IGNORE"]
+ cfgprot = cfgfiledict["IGNORE"]
+ if not moveme:
+ zing = "---"
+ mymtime = mystat[stat.ST_MTIME]
+ else:
+ moveme = 1
+ cfgprot = 1
+ if moveme:
+ # Merging a new file, so update confmem.
+ cfgfiledict[myrealdest] = [mymd5]
+ elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
+ """A previously remembered update has been
+ accepted, so it is removed from confmem."""
+ del cfgfiledict[myrealdest]
+
+ if cfgprot:
+ mydest = new_protect_filename(mydest, newmd5=mymd5)
+
+ # whether config protection or not, we merge the new file the
+ # same way. Unless moveme=0 (blocking directory)
+ if moveme:
+ # Create hardlinks only for source files that already exist
+ # as hardlinks (having identical st_dev and st_ino).
+ hardlink_key = (mystat.st_dev, mystat.st_ino)
+
+ hardlink_candidates = self._md5_merge_map.get(hardlink_key)
+ if hardlink_candidates is None:
+ hardlink_candidates = []
+ self._md5_merge_map[hardlink_key] = hardlink_candidates
+
+ mymtime = movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ hardlink_candidates=hardlink_candidates,
+ encoding=_encodings['merge'])
+ if mymtime is None:
+ return 1
+ if hardlink_candidates is not None:
+ hardlink_candidates.append(mydest)
+ zing = ">>>"
+
+ if mymtime != None:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ showMessage("%s %s\n" % (zing,mydest))
+ else:
+ # we are merging a fifo or device node
+ zing = "!!!"
+ if mydmode is None:
+ # destination doesn't exist
+ if movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ encoding=_encodings['merge']) is not None:
+ zing = ">>>"
+ else:
+ return 1
+ if stat.S_ISFIFO(mymode):
+ outfile.write("fif %s\n" % myrealdest)
+ else:
+ outfile.write("dev %s\n" % myrealdest)
+ showMessage(zing + " " + mydest + "\n")
+
+ def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
+ mydbapi=None, prev_mtimes=None, counter=None):
+ """
+ @param myroot: ignored, self._eroot is used instead
+ """
+ myroot = None
+ retval = -1
+ parallel_install = "parallel-install" in self.settings.features
+ if not parallel_install:
+ self.lockdb()
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ if self._scheduler is None:
+ self._scheduler = PollScheduler().sched_iface
+ try:
+ retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
+ cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
+ counter=counter)
+
+ # If PORTAGE_BUILDDIR doesn't exist, then it probably means
+ # fail-clean is enabled, and the success/die hooks have
+ # already been called by EbuildPhase.
+ if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+
+ if retval == os.EX_OK:
+ phase = 'success_hooks'
+ else:
+ phase = 'die_hooks'
+
+ ebuild_phase = MiscFunctionsProcess(
+ background=False, commands=[phase],
+ scheduler=self._scheduler, settings=self.settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ self._elog_process()
+
+ if 'noclean' not in self.settings.features and \
+ (retval == os.EX_OK or \
+ 'fail-clean' in self.settings.features):
+ if myebuild is None:
+ myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+
+ doebuild_environment(myebuild, "clean",
+ settings=self.settings, db=mydbapi)
+ phase = EbuildPhase(background=False, phase="clean",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ phase.wait()
+ finally:
+ self.settings.pop('REPLACING_VERSIONS', None)
+ if self.vartree.dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ self.vartree.dbapi._linkmap._clear_cache()
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ if not parallel_install:
+ self.unlockdb()
+ return retval
+
+ def getstring(self,name):
+ "returns contents of a file with whitespace converted to spaces"
+ if not os.path.exists(self.dbdir+"/"+name):
+ return ""
+ mydata = io.open(
+ _unicode_encode(os.path.join(self.dbdir, name),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ).read().split()
+ return " ".join(mydata)
+
+ def copyfile(self,fname):
+ shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
+
+ def getfile(self,fname):
+ if not os.path.exists(self.dbdir+"/"+fname):
+ return ""
+ return io.open(_unicode_encode(os.path.join(self.dbdir, fname),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ).read()
+
+ def setfile(self,fname,data):
+ kwargs = {}
+ if fname == 'environment.bz2' or not isinstance(data, basestring):
+ kwargs['mode'] = 'wb'
+ else:
+ kwargs['mode'] = 'w'
+ kwargs['encoding'] = _encodings['repo.content']
+ write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
+
+ def getelements(self,ename):
+ if not os.path.exists(self.dbdir+"/"+ename):
+ return []
+ mylines = io.open(_unicode_encode(
+ os.path.join(self.dbdir, ename),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ).readlines()
+ myreturn = []
+ for x in mylines:
+ for y in x[:-1].split():
+ myreturn.append(y)
+ return myreturn
+
+ def setelements(self,mylist,ename):
+ myelement = io.open(_unicode_encode(
+ os.path.join(self.dbdir, ename),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ for x in mylist:
+ myelement.write(_unicode_decode(x+"\n"))
+ myelement.close()
+
+ def isregular(self):
+ "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
+ return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
+
+def merge(mycat, mypkg, pkgloc, infloc,
+ myroot=None, settings=None, myebuild=None,
+ mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
+ scheduler=None):
+ """
+ @param myroot: ignored, settings['EROOT'] is used instead
+ """
+ myroot = None
+ if settings is None:
+ raise TypeError("settings argument is required")
+ if not os.access(settings['EROOT'], os.W_OK):
+ writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
+ noiselevel=-1)
+ return errno.EACCES
+ background = (settings.get('PORTAGE_BACKGROUND') == '1')
+ merge_task = MergeProcess(
+ mycat=mycat, mypkg=mypkg, settings=settings,
+ treetype=mytree, vartree=vartree,
+ scheduler=(scheduler or PollScheduler().sched_iface),
+ background=background, blockers=blockers, pkgloc=pkgloc,
+ infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
+ prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
+ merge_task.start()
+ retcode = merge_task.wait()
+ return retcode
+
+def unmerge(cat, pkg, myroot=None, settings=None,
+ mytrimworld=None, vartree=None,
+ ldpath_mtimes=None, scheduler=None):
+ """
+ @param myroot: ignored, settings['EROOT'] is used instead
+ @param mytrimworld: ignored
+ """
+ myroot = None
+ if settings is None:
+ raise TypeError("settings argument is required")
+ mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
+ vartree=vartree, scheduler=scheduler)
+ vartree = mylink.vartree
+ parallel_install = "parallel-install" in settings.features
+ if not parallel_install:
+ mylink.lockdb()
+ try:
+ if mylink.exists():
+ retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
+ if retval == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ return retval
+ return os.EX_OK
+ finally:
+ if vartree.dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ vartree.dbapi._linkmap._clear_cache()
+ if not parallel_install:
+ mylink.unlockdb()
+
+def write_contents(contents, root, f):
+ """
+ Write contents to any file like object. The file will be left open.
+ """
+ root_len = len(root) - 1
+ for filename in sorted(contents):
+ entry_data = contents[filename]
+ entry_type = entry_data[0]
+ relative_filename = filename[root_len:]
+ if entry_type == "obj":
+ entry_type, mtime, md5sum = entry_data
+ line = "%s %s %s %s\n" % \
+ (entry_type, relative_filename, md5sum, mtime)
+ elif entry_type == "sym":
+ entry_type, mtime, link = entry_data
+ line = "%s %s -> %s %s\n" % \
+ (entry_type, relative_filename, link, mtime)
+ else: # dir, dev, fif
+ line = "%s %s\n" % (entry_type, relative_filename)
+ f.write(line)
+
+def tar_contents(contents, root, tar, protect=None, onProgress=None):
+ os = _os_merge
+
+ try:
+ for x in contents:
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in contents:
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
+ id_strings = {}
+ maxval = len(contents)
+ curval = 0
+ if onProgress:
+ onProgress(maxval, 0)
+ paths = list(contents)
+ paths.sort()
+ for path in paths:
+ curval += 1
+ try:
+ lst = os.lstat(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ if onProgress:
+ onProgress(maxval, curval)
+ continue
+ contents_type = contents[path][0]
+ if path.startswith(root):
+ arcname = path[len(root):]
+ else:
+ raise ValueError("invalid root argument: '%s'" % root)
+ live_path = path
+ if 'dir' == contents_type and \
+ not stat.S_ISDIR(lst.st_mode) and \
+ os.path.isdir(live_path):
+ # Even though this was a directory in the original ${D}, it exists
+ # as a symlink to a directory in the live filesystem. It must be
+ # recorded as a real directory in the tar file to ensure that tar
+ # can properly extract it's children.
+ live_path = os.path.realpath(live_path)
+ tarinfo = tar.gettarinfo(live_path, arcname)
+
+ if stat.S_ISREG(lst.st_mode):
+ if protect and protect(path):
+ # Create an empty file as a place holder in order to avoid
+ # potential collision-protect issues.
+ f = tempfile.TemporaryFile()
+ f.write(_unicode_encode(
+ "# empty file because --include-config=n " + \
+ "when `quickpkg` was used\n"))
+ f.flush()
+ f.seek(0)
+ tarinfo.size = os.fstat(f.fileno()).st_size
+ tar.addfile(tarinfo, f)
+ f.close()
+ else:
+ f = open(_unicode_encode(path,
+ encoding=object.__getattribute__(os, '_encoding'),
+ errors='strict'), 'rb')
+ try:
+ tar.addfile(tarinfo, f)
+ finally:
+ f.close()
+ else:
+ tar.addfile(tarinfo)
+ if onProgress:
+ onProgress(maxval, curval)
diff --git a/portage_with_autodep/pym/portage/dbapi/virtual.py b/portage_with_autodep/pym/portage/dbapi/virtual.py
new file mode 100644
index 0000000..ec97ffe
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/virtual.py
@@ -0,0 +1,131 @@
+# Copyright 1998-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+from portage.dbapi import dbapi
+from portage import cpv_getkey
+
+class fakedbapi(dbapi):
+ """A fake dbapi that allows consumers to inject/remove packages to/from it
+ portage.settings is required to maintain the dbAPI.
+ """
+ def __init__(self, settings=None, exclusive_slots=True):
+ """
+ @param exclusive_slots: When True, injecting a package with SLOT
+ metadata causes an existing package in the same slot to be
+ automatically removed (default is True).
+ @type exclusive_slots: Boolean
+ """
+ self._exclusive_slots = exclusive_slots
+ self.cpvdict = {}
+ self.cpdict = {}
+ if settings is None:
+ from portage import settings
+ self.settings = settings
+ self._match_cache = {}
+
+ def _clear_cache(self):
+ if self._categories is not None:
+ self._categories = None
+ if self._match_cache:
+ self._match_cache = {}
+
+ def match(self, origdep, use_cache=1):
+ result = self._match_cache.get(origdep, None)
+ if result is not None:
+ return result[:]
+ result = dbapi.match(self, origdep, use_cache=use_cache)
+ self._match_cache[origdep] = result
+ return result[:]
+
+ def cpv_exists(self, mycpv, myrepo=None):
+ return mycpv in self.cpvdict
+
+ def cp_list(self, mycp, use_cache=1, myrepo=None):
+ cachelist = self._match_cache.get(mycp)
+ # cp_list() doesn't expand old-style virtuals
+ if cachelist and cachelist[0].startswith(mycp):
+ return cachelist[:]
+ cpv_list = self.cpdict.get(mycp)
+ if cpv_list is None:
+ cpv_list = []
+ self._cpv_sort_ascending(cpv_list)
+ if not (not cpv_list and mycp.startswith("virtual/")):
+ self._match_cache[mycp] = cpv_list
+ return cpv_list[:]
+
+ def cp_all(self):
+ return list(self.cpdict)
+
+ def cpv_all(self):
+ return list(self.cpvdict)
+
+ def cpv_inject(self, mycpv, metadata=None):
+ """Adds a cpv to the list of available packages. See the
+ exclusive_slots constructor parameter for behavior with
+ respect to SLOT metadata.
+ @param mycpv: cpv for the package to inject
+ @type mycpv: str
+ @param metadata: dictionary of raw metadata for aux_get() calls
+ @param metadata: dict
+ """
+ self._clear_cache()
+ mycp = cpv_getkey(mycpv)
+ self.cpvdict[mycpv] = metadata
+ myslot = None
+ if self._exclusive_slots and metadata:
+ myslot = metadata.get("SLOT", None)
+ if myslot and mycp in self.cpdict:
+ # If necessary, remove another package in the same SLOT.
+ for cpv in self.cpdict[mycp]:
+ if mycpv != cpv:
+ other_metadata = self.cpvdict[cpv]
+ if other_metadata:
+ if myslot == other_metadata.get("SLOT", None):
+ self.cpv_remove(cpv)
+ break
+ if mycp not in self.cpdict:
+ self.cpdict[mycp] = []
+ if not mycpv in self.cpdict[mycp]:
+ self.cpdict[mycp].append(mycpv)
+
+ def cpv_remove(self,mycpv):
+ """Removes a cpv from the list of available packages."""
+ self._clear_cache()
+ mycp = cpv_getkey(mycpv)
+ if mycpv in self.cpvdict:
+ del self.cpvdict[mycpv]
+ if mycp not in self.cpdict:
+ return
+ while mycpv in self.cpdict[mycp]:
+ del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
+ if not len(self.cpdict[mycp]):
+ del self.cpdict[mycp]
+
+ def aux_get(self, mycpv, wants, myrepo=None):
+ if not self.cpv_exists(mycpv):
+ raise KeyError(mycpv)
+ metadata = self.cpvdict[mycpv]
+ if not metadata:
+ return ["" for x in wants]
+ return [metadata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ self._clear_cache()
+ self.cpvdict[cpv].update(values)
+
+class testdbapi(object):
+ """A dbapi instance with completely fake functions to get by hitting disk
+ TODO(antarus):
+ This class really needs to be rewritten to have better stubs; but these work for now.
+ The dbapi classes themselves need unit tests...and that will be a lot of work.
+ """
+
+ def __init__(self):
+ self.cpvs = {}
+ def f(*args, **kwargs):
+ return True
+ fake_api = dir(dbapi)
+ for call in fake_api:
+ if not hasattr(self, call):
+ setattr(self, call, f)
diff --git a/portage_with_autodep/pym/portage/debug.py b/portage_with_autodep/pym/portage/debug.py
new file mode 100644
index 0000000..ce642fe
--- /dev/null
+++ b/portage_with_autodep/pym/portage/debug.py
@@ -0,0 +1,120 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import sys
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage.const
+from portage.util import writemsg
+
+def set_trace(on=True):
+ if on:
+ t = trace_handler()
+ threading.settrace(t.event_handler)
+ sys.settrace(t.event_handler)
+ else:
+ sys.settrace(None)
+ threading.settrace(None)
+
+class trace_handler(object):
+
+ def __init__(self):
+ python_system_paths = []
+ for x in sys.path:
+ if os.path.basename(x).startswith("python2."):
+ python_system_paths.append(x)
+
+ self.ignore_prefixes = []
+ for x in python_system_paths:
+ self.ignore_prefixes.append(x + os.sep)
+
+ self.trim_filename = prefix_trimmer(os.path.join(portage.const.PORTAGE_BASE_PATH, "pym") + os.sep).trim
+ self.show_local_lines = False
+ self.max_repr_length = 200
+
+ def event_handler(self, *args):
+ frame, event, arg = args
+ if "line" == event:
+ if self.show_local_lines:
+ self.trace_line(*args)
+ else:
+ if not self.ignore_filename(frame.f_code.co_filename):
+ self.trace_event(*args)
+ return self.event_handler
+
+ def trace_event(self, frame, event, arg):
+ writemsg("%s line=%d name=%s event=%s %slocals=%s\n" % \
+ (self.trim_filename(frame.f_code.co_filename),
+ frame.f_lineno,
+ frame.f_code.co_name,
+ event,
+ self.arg_repr(frame, event, arg),
+ self.locals_repr(frame, event, arg)))
+
+ def arg_repr(self, frame, event, arg):
+ my_repr = None
+ if "return" == event:
+ my_repr = repr(arg)
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "value=%s " % my_repr
+ elif "exception" == event:
+ my_repr = repr(arg[1])
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "type=%s value=%s " % (arg[0], my_repr)
+
+ return ""
+
+ def trace_line(self, frame, event, arg):
+ writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
+
+ def ignore_filename(self, filename):
+ if filename:
+ for x in self.ignore_prefixes:
+ if filename.startswith(x):
+ return True
+ return False
+
+ def locals_repr(self, frame, event, arg):
+ """Create a representation of the locals dict that is suitable for
+ tracing output."""
+
+ my_locals = frame.f_locals.copy()
+
+ # prevent unsafe __repr__ call on self when __init__ is called
+ # (method calls aren't safe until after __init__ has completed).
+ if frame.f_code.co_name == "__init__" and "self" in my_locals:
+ my_locals["self"] = "omitted"
+
+ # We omit items that will lead to unreasonable bloat of the trace
+ # output (and resulting log file).
+ for k, v in my_locals.items():
+ my_repr = repr(v)
+ if len(my_repr) > self.max_repr_length:
+ my_locals[k] = "omitted"
+ return my_locals
+
+class prefix_trimmer(object):
+ def __init__(self, prefix):
+ self.prefix = prefix
+ self.cut_index = len(prefix)
+ self.previous = None
+ self.previous_trimmed = None
+
+ def trim(self, s):
+ """Remove a prefix from the string and return the result.
+ The previous result is automatically cached."""
+ if s == self.previous:
+ return self.previous_trimmed
+ else:
+ if s.startswith(self.prefix):
+ self.previous_trimmed = s[self.cut_index:]
+ else:
+ self.previous_trimmed = s
+ return self.previous_trimmed
diff --git a/portage_with_autodep/pym/portage/dep/__init__.py b/portage_with_autodep/pym/portage/dep/__init__.py
new file mode 100644
index 0000000..fd5ad30
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dep/__init__.py
@@ -0,0 +1,2432 @@
+# deps.py -- Portage dependency resolution functions
+# Copyright 2003-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+ 'Atom', 'best_match_to_list', 'cpvequal',
+ 'dep_getcpv', 'dep_getkey', 'dep_getslot',
+ 'dep_getusedeps', 'dep_opconvert', 'flatten',
+ 'get_operator', 'isjustname', 'isspecific',
+ 'isvalidatom', 'match_from_list', 'match_to_list',
+ 'paren_enclose', 'paren_normalize', 'paren_reduce',
+ 'remove_slot', 'strip_empty', 'use_reduce',
+ '_repo_separator', '_slot_separator',
+]
+
+# DEPEND SYNTAX:
+#
+# 'use?' only affects the immediately following word!
+# Nesting is the only legal way to form multiple '[!]use?' requirements.
+#
+# Where: 'a' and 'b' are use flags, and 'z' is a depend atom.
+#
+# "a? z" -- If 'a' in [use], then b is valid.
+# "a? ( z )" -- Syntax with parenthesis.
+# "a? b? z" -- Deprecated.
+# "a? ( b? z )" -- Valid
+# "a? ( b? ( z ) ) -- Valid
+#
+
+import re, sys
+import warnings
+from itertools import chain
+from portage import _unicode_decode
+from portage.eapi import eapi_has_slot_deps, eapi_has_src_uri_arrows, \
+ eapi_has_use_deps, eapi_has_strong_blocks, eapi_has_use_dep_defaults
+from portage.exception import InvalidAtom, InvalidData, InvalidDependString
+from portage.localization import _
+from portage.versions import catpkgsplit, catsplit, \
+ pkgcmp, ververify, _cp, _cpv
+import portage.cache.mappings
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+# Api consumers included in portage should set this to True.
+# Once the relevant api changes are in a portage release with
+# stable keywords, make these warnings unconditional.
+_internal_warnings = False
+
+def cpvequal(cpv1, cpv2):
+ """
+
+ @param cpv1: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
+ @type cpv1: String
+ @param cpv2: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
+ @type cpv2: String
+ @rtype: Boolean
+ @returns:
+ 1. True if cpv1 = cpv2
+ 2. False Otherwise
+ 3. Throws PortageException if cpv1 or cpv2 is not a CPV
+
+ Example Usage:
+ >>> from portage.dep import cpvequal
+ >>> cpvequal("sys-apps/portage-2.1","sys-apps/portage-2.1")
+ >>> True
+
+ """
+
+ split1 = catpkgsplit(cpv1)
+ split2 = catpkgsplit(cpv2)
+
+ if not split1 or not split2:
+ raise portage.exception.PortageException(_("Invalid data '%s, %s', parameter was not a CPV") % (cpv1, cpv2))
+
+ if split1[0] != split2[0]:
+ return False
+
+ return (pkgcmp(split1[1:], split2[1:]) == 0)
+
+def strip_empty(myarr):
+ """
+ Strip all empty elements from an array
+
+ @param myarr: The list of elements
+ @type myarr: List
+ @rtype: Array
+ @return: The array with empty elements removed
+ """
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.strip_empty',), DeprecationWarning, stacklevel=2)
+ return [x for x in myarr if x]
+
+def paren_reduce(mystr):
+ """
+ Take a string and convert all paren enclosed entities into sublists and
+ split the list elements by spaces. All redundant brackets are removed.
+
+ Example usage:
+ >>> paren_reduce('foobar foo? ( bar baz )')
+ ['foobar', 'foo?', ['bar', 'baz']]
+
+ @param mystr: The string to reduce
+ @type mystr: String
+ @rtype: Array
+ @return: The reduced string in an array
+ """
+ if _internal_warnings:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.paren_reduce',), DeprecationWarning, stacklevel=2)
+ mysplit = mystr.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def ends_in_operator(k):
+ return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
+ if len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ stack[level].extend(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ stack[level].append(l)
+
+ if l:
+ if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif len(l) == 1 and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A
+ stack[level].pop()
+ special_append()
+ elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
+ #Optimize: || ( || ( ... ) ) -> || ( ... )
+ # foo? ( foo? ( ... ) ) -> foo? ( ... )
+ # || ( foo? ( ... ) ) -> foo? ( ... )
+ stack[level].pop()
+ special_append()
+ else:
+ special_append()
+ else:
+ if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
+ stack[level].pop()
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ elif token == "||":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ if token[-1] == "?":
+ need_bracket = True
+
+ stack[level].append(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ return stack[0]
+
+class paren_normalize(list):
+ """Take a dependency structure as returned by paren_reduce or use_reduce
+ and generate an equivalent structure that has no redundant lists."""
+ def __init__(self, src):
+ if _internal_warnings:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.paren_normalize',), DeprecationWarning, stacklevel=2)
+ list.__init__(self)
+ self._zap_parens(src, self)
+
+ def _zap_parens(self, src, dest, disjunction=False):
+ if not src:
+ return dest
+ i = iter(src)
+ for x in i:
+ if isinstance(x, basestring):
+ if x in ('||', '^^'):
+ y = self._zap_parens(next(i), [], disjunction=True)
+ if len(y) == 1:
+ dest.append(y[0])
+ else:
+ dest.append(x)
+ dest.append(y)
+ elif x.endswith("?"):
+ dest.append(x)
+ dest.append(self._zap_parens(next(i), []))
+ else:
+ dest.append(x)
+ else:
+ if disjunction:
+ x = self._zap_parens(x, [])
+ if len(x) == 1:
+ dest.append(x[0])
+ else:
+ dest.append(x)
+ else:
+ self._zap_parens(x, dest)
+ return dest
+
+def paren_enclose(mylist, unevaluated_atom=False):
+ """
+ Convert a list to a string with sublists enclosed with parens.
+
+ Example usage:
+ >>> test = ['foobar','foo',['bar','baz']]
+ >>> paren_enclose(test)
+ 'foobar foo ( bar baz )'
+
+ @param mylist: The list
+ @type mylist: List
+ @rtype: String
+ @return: The paren enclosed string
+ """
+ mystrparts = []
+ for x in mylist:
+ if isinstance(x, list):
+ mystrparts.append("( "+paren_enclose(x)+" )")
+ else:
+ if unevaluated_atom:
+ x = getattr(x, 'unevaluated_atom', x)
+ mystrparts.append(x)
+ return " ".join(mystrparts)
+
+def use_reduce(depstr, uselist=[], masklist=[], matchall=False, excludeall=[], is_src_uri=False, \
+ eapi=None, opconvert=False, flat=False, is_valid_flag=None, token_class=None, matchnone=False):
+ """
+ Takes a dep string and reduces the use? conditionals out, leaving an array
+ with subarrays. All redundant brackets are removed.
+
+ @param deparray: depstring
+ @type deparray: String
+ @param uselist: List of use enabled flags
+ @type uselist: List
+ @param masklist: List of masked flags (always treated as disabled)
+ @type masklist: List
+ @param matchall: Treat all conditionals as active. Used by repoman.
+ @type matchall: Bool
+ @param excludeall: List of flags for which negated conditionals are always treated as inactive.
+ @type excludeall: List
+ @param is_src_uri: Indicates if depstr represents a SRC_URI
+ @type is_src_uri: Bool
+ @param eapi: Indicates the EAPI the dep string has to comply to
+ @type eapi: String
+ @param opconvert: Put every operator as first element into it's argument list
+ @type opconvert: Bool
+ @param flat: Create a flat list of all tokens
+ @type flat: Bool
+ @param is_valid_flag: Function that decides if a given use flag might be used in use conditionals
+ @type is_valid_flag: Function
+ @param token_class: Convert all non operator tokens into this class
+ @type token_class: Class
+ @param matchnone: Treat all conditionals as inactive. Used by digestgen().
+ @type matchnone: Bool
+ @rtype: List
+ @return: The use reduced depend array
+ """
+ if isinstance(depstr, list):
+ if _internal_warnings:
+ warnings.warn(_("Passing paren_reduced dep arrays to %s is deprecated. " + \
+ "Pass the original dep string instead.") % \
+ ('portage.dep.use_reduce',), DeprecationWarning, stacklevel=2)
+ depstr = paren_enclose(depstr)
+
+ if opconvert and flat:
+ raise ValueError("portage.dep.use_reduce: 'opconvert' and 'flat' are mutually exclusive")
+
+ if matchall and matchnone:
+ raise ValueError("portage.dep.use_reduce: 'matchall' and 'matchnone' are mutually exclusive")
+
+ useflag_re = _get_useflag_re(eapi)
+
+ def is_active(conditional):
+ """
+ Decides if a given use conditional is active.
+ """
+ if conditional.startswith("!"):
+ flag = conditional[1:-1]
+ is_negated = True
+ else:
+ flag = conditional[:-1]
+ is_negated = False
+
+ if is_valid_flag:
+ if not is_valid_flag(flag):
+ msg = _("USE flag '%s' referenced in " + \
+ "conditional '%s' is not in IUSE") \
+ % (flag, conditional)
+ e = InvalidData(msg, category='IUSE.missing')
+ raise InvalidDependString(msg, errors=(e,))
+ else:
+ if useflag_re.match(flag) is None:
+ raise InvalidDependString(
+ _("invalid use flag '%s' in conditional '%s'") % (flag, conditional))
+
+ if is_negated and flag in excludeall:
+ return False
+
+ if flag in masklist:
+ return is_negated
+
+ if matchall:
+ return True
+
+ if matchnone:
+ return False
+
+ return (flag in uselist and not is_negated) or \
+ (flag not in uselist and is_negated)
+
+ def missing_white_space_check(token, pos):
+ """
+ Used to generate good error messages for invalid tokens.
+ """
+ for x in (")", "(", "||"):
+ if token.startswith(x) or token.endswith(x):
+ raise InvalidDependString(
+ _("missing whitespace around '%s' at '%s', token %s") % (x, token, pos+1))
+
+ mysplit = depstr.split()
+ #Count the bracket level.
+ level = 0
+ #We parse into a stack. Every time we hit a '(', a new empty list is appended to the stack.
+ #When we hit a ')', the last list in the stack is merged with list one level up.
+ stack = [[]]
+ #Set need_bracket to True after use conditionals or ||. Other tokens need to ensure
+ #that need_bracket is not True.
+ need_bracket = False
+ #Set need_simple_token to True after a SRC_URI arrow. Other tokens need to ensure
+ #that need_simple_token is not True.
+ need_simple_token = False
+
+ for pos, token in enumerate(mysplit):
+ if token == "(":
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if len(mysplit) >= pos+2 and mysplit[pos+1] == ")":
+ raise InvalidDependString(
+ _("expected: dependency string, got: ')', token %s") % (pos+1,))
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+
+ is_single = len(l) == 1 or \
+ (opconvert and l and l[0] == "||") or \
+ (not opconvert and len(l)==2 and l[0] == "||")
+ ignore = False
+
+ if flat:
+ #In 'flat' mode, we simply merge all lists into a single large one.
+ if stack[level] and stack[level][-1][-1] == "?":
+ #The last token before the '(' that matches the current ')'
+ #was a use conditional. The conditional is removed in any case.
+ #Merge the current list if needed.
+ if is_active(stack[level][-1]):
+ stack[level].pop()
+ stack[level].extend(l)
+ else:
+ stack[level].pop()
+ else:
+ stack[level].extend(l)
+ continue
+
+ if stack[level]:
+ if stack[level][-1] == "||" and not l:
+ #Optimize: || ( ) -> .
+ stack[level].pop()
+ elif stack[level][-1][-1] == "?":
+ #The last token before the '(' that matches the current ')'
+ #was a use conditional, remove it and decide if we
+ #have to keep the current list.
+ if not is_active(stack[level][-1]):
+ ignore = True
+ stack[level].pop()
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def starts_with_any_of_dep(k):
+ #'ends_in_any_of_dep' for opconvert
+ return k>=0 and stack[k] and stack[k][0] == "||"
+
+ def last_any_of_operator_level(k):
+ #Returns the level of the last || operator if it is in effect for
+ #the current level. It is not in effect, if there is a level, that
+ #ends in a non-operator. This is almost equivalent to stack[level][-1]=="||",
+ #expect that it skips empty levels.
+ while k>=0:
+ if stack[k]:
+ if stack[k][-1] == "||":
+ return k
+ elif stack[k][-1][-1] != "?":
+ return -1
+ k -= 1
+ return -1
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single:
+ #Either [A], [[...]] or [|| [...]]
+ if l[0] == "||" and ends_in_any_of_dep(level-1):
+ if opconvert:
+ stack[level].extend(l[1:])
+ else:
+ stack[level].extend(l[1])
+ elif len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ last = last_any_of_operator_level(level-1)
+ if last == -1:
+ if opconvert and isinstance(l[0], list) \
+ and l[0] and l[0][0] == '||':
+ stack[level].append(l[0])
+ else:
+ stack[level].extend(l[0])
+ else:
+ if opconvert and l[0] and l[0][0] == "||":
+ stack[level].extend(l[0][1:])
+ else:
+ stack[level].append(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ if opconvert and stack[level] and stack[level][-1] == '||':
+ stack[level][-1] = ['||'] + l
+ else:
+ stack[level].append(l)
+
+ if l and not ignore:
+ #The current list is not empty and we don't want to ignore it because
+ #of an inactive use conditional.
+ if not ends_in_any_of_dep(level-1) and not ends_in_any_of_dep(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif is_single and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A, || ( || ( ... ) ) -> || ( ... )
+ stack[level].pop()
+ special_append()
+ elif ends_in_any_of_dep(level) and ends_in_any_of_dep(level-1):
+ #Optimize: || ( A || ( B C ) ) -> || ( A B C )
+ stack[level].pop()
+ stack[level].extend(l)
+ else:
+ if opconvert and ends_in_any_of_dep(level):
+ #In opconvert mode, we have to move the operator from the level
+ #above into the current list.
+ stack[level].pop()
+ stack[level].append(["||"] + l)
+ else:
+ special_append()
+
+ else:
+ raise InvalidDependString(
+ _("no matching '%s' for '%s', token %s") % ("(", ")", pos+1))
+ elif token == "||":
+ if is_src_uri:
+ raise InvalidDependString(
+ _("any-of dependencies are not allowed in SRC_URI: token %s") % (pos+1,))
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+ need_bracket = True
+ stack[level].append(token)
+ elif token == "->":
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if not is_src_uri:
+ raise InvalidDependString(
+ _("SRC_URI arrow are only allowed in SRC_URI: token %s") % (pos+1,))
+ if eapi is None or not eapi_has_src_uri_arrows(eapi):
+ raise InvalidDependString(
+ _("SRC_URI arrow not allowed in EAPI %s: token %s") % (eapi, pos+1))
+ need_simple_token = True
+ stack[level].append(token)
+ else:
+ missing_white_space_check(token, pos)
+
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+
+ if need_simple_token and "/" in token:
+ #The last token was a SRC_URI arrow, make sure we have a simple file name.
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+
+ if token[-1] == "?":
+ need_bracket = True
+ else:
+ need_simple_token = False
+ if token_class and not is_src_uri:
+ #Add a hack for SRC_URI here, to avoid conditional code at the consumer level
+ try:
+ token = token_class(token, eapi=eapi,
+ is_valid_flag=is_valid_flag)
+ except InvalidAtom as e:
+ raise InvalidDependString(
+ _("Invalid atom (%s), token %s") \
+ % (e, pos+1), errors=(e,))
+ except SystemExit:
+ raise
+ except Exception as e:
+ raise InvalidDependString(
+ _("Invalid token '%s', token %s") % (token, pos+1))
+
+ if not matchall and \
+ hasattr(token, 'evaluate_conditionals'):
+ token = token.evaluate_conditionals(uselist)
+
+ stack[level].append(token)
+
+ if level != 0:
+ raise InvalidDependString(
+ _("Missing '%s' at end of string") % (")",))
+
+ if need_bracket:
+ raise InvalidDependString(
+ _("Missing '%s' at end of string") % ("(",))
+
+ if need_simple_token:
+ raise InvalidDependString(
+ _("Missing file name at end of string"))
+
+ return stack[0]
+
+def dep_opconvert(deplist):
+ """
+ Iterate recursively through a list of deps, if the
+ dep is a '||' or '&&' operator, combine it with the
+ list of deps that follows..
+
+ Example usage:
+ >>> test = ["blah", "||", ["foo", "bar", "baz"]]
+ >>> dep_opconvert(test)
+ ['blah', ['||', 'foo', 'bar', 'baz']]
+
+ @param deplist: A list of deps to format
+ @type mydep: List
+ @rtype: List
+ @return:
+ The new list with the new ordering
+ """
+ if _internal_warnings:
+ warnings.warn(_("%s is deprecated. Use %s with the opconvert parameter set to True instead.") % \
+ ('portage.dep.dep_opconvert', 'portage.dep.use_reduce'), DeprecationWarning, stacklevel=2)
+
+ retlist = []
+ x = 0
+ while x != len(deplist):
+ if isinstance(deplist[x], list):
+ retlist.append(dep_opconvert(deplist[x]))
+ elif deplist[x] == "||":
+ retlist.append([deplist[x]] + dep_opconvert(deplist[x+1]))
+ x += 1
+ else:
+ retlist.append(deplist[x])
+ x += 1
+ return retlist
+
+def flatten(mylist):
+ """
+ Recursively traverse nested lists and return a single list containing
+ all non-list elements that are found.
+
+ Example usage:
+ >>> flatten([1, [2, 3, [4]]])
+ [1, 2, 3, 4]
+
+ @param mylist: A list containing nested lists and non-list elements.
+ @type mylist: List
+ @rtype: List
+ @return: A single list containing only non-list elements.
+ """
+ if _internal_warnings:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.flatten',), DeprecationWarning, stacklevel=2)
+
+ newlist = []
+ for x in mylist:
+ if isinstance(x, list):
+ newlist.extend(flatten(x))
+ else:
+ newlist.append(x)
+ return newlist
+
+
+_usedep_re = {
+ "0": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"),
+ "4-python": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@.-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"),
+}
+
+def _get_usedep_re(eapi):
+ """
+ When eapi is None then validation is not as strict, since we want the
+ same to work for multiple EAPIs that may have slightly different rules.
+ @param eapi: The EAPI
+ @type eapi: String or None
+ @rtype: regular expression object
+ @return: A regular expression object that matches valid USE deps for the
+ given eapi.
+ """
+ if eapi in (None, "4-python",):
+ return _usedep_re["4-python"]
+ else:
+ return _usedep_re["0"]
+
+class _use_dep(object):
+
+ __slots__ = ("__weakref__", "eapi", "conditional", "missing_enabled", "missing_disabled",
+ "disabled", "enabled", "tokens", "required")
+
+ class _conditionals_class(object):
+ __slots__ = ("enabled", "disabled", "equal", "not_equal")
+
+ def items(self):
+ for k in self.__slots__:
+ v = getattr(self, k, None)
+ if v:
+ yield (k, v)
+
+ def values(self):
+ for k in self.__slots__:
+ v = getattr(self, k, None)
+ if v:
+ yield v
+
+ # used in InvalidAtom messages
+ _conditional_strings = {
+ 'enabled' : '%s?',
+ 'disabled': '!%s?',
+ 'equal': '%s=',
+ 'not_equal': '!%s=',
+ }
+
+ def __init__(self, use, eapi, enabled_flags=None, disabled_flags=None, missing_enabled=None, \
+ missing_disabled=None, conditional=None, required=None):
+
+ self.eapi = eapi
+
+ if enabled_flags is not None:
+ #A shortcut for the classe's own methods.
+ self.tokens = use
+ if not isinstance(self.tokens, tuple):
+ self.tokens = tuple(self.tokens)
+
+ self.required = frozenset(required)
+ self.enabled = frozenset(enabled_flags)
+ self.disabled = frozenset(disabled_flags)
+ self.missing_enabled = frozenset(missing_enabled)
+ self.missing_disabled = frozenset(missing_disabled)
+ self.conditional = None
+
+ if conditional:
+ self.conditional = self._conditionals_class()
+ for k in "enabled", "disabled", "equal", "not_equal":
+ setattr(self.conditional, k, frozenset(conditional.get(k, [])))
+
+ return
+
+ enabled_flags = set()
+ disabled_flags = set()
+ missing_enabled = set()
+ missing_disabled = set()
+ no_default = set()
+
+ conditional = {}
+ usedep_re = _get_usedep_re(self.eapi)
+
+ for x in use:
+ m = usedep_re.match(x)
+ if m is None:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+
+ if not operator:
+ enabled_flags.add(flag)
+ elif operator == "-":
+ disabled_flags.add(flag)
+ elif operator == "?":
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ conditional.setdefault("disabled", set()).add(flag)
+ else:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+
+ if default:
+ if default == "(+)":
+ if flag in missing_disabled or flag in no_default:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ missing_enabled.add(flag)
+ else:
+ if flag in missing_enabled or flag in no_default:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ missing_disabled.add(flag)
+ else:
+ if flag in missing_enabled or flag in missing_disabled:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ no_default.add(flag)
+
+ self.tokens = use
+ if not isinstance(self.tokens, tuple):
+ self.tokens = tuple(self.tokens)
+
+ self.required = frozenset(no_default)
+
+ self.enabled = frozenset(enabled_flags)
+ self.disabled = frozenset(disabled_flags)
+ self.missing_enabled = frozenset(missing_enabled)
+ self.missing_disabled = frozenset(missing_disabled)
+ self.conditional = None
+
+ if conditional:
+ self.conditional = self._conditionals_class()
+ for k in "enabled", "disabled", "equal", "not_equal":
+ setattr(self.conditional, k, frozenset(conditional.get(k, [])))
+
+ def __bool__(self):
+ return bool(self.tokens)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __str__(self):
+ if not self.tokens:
+ return ""
+ return "[%s]" % (",".join(self.tokens),)
+
+ def __repr__(self):
+ return "portage.dep._use_dep(%s)" % repr(self.tokens)
+
+ def evaluate_conditionals(self, use):
+ """
+ Create a new instance with conditionals evaluated.
+
+ Conditional evaluation behavior:
+
+ parent state conditional result
+
+ x x? x
+ -x x?
+ x !x?
+ -x !x? -x
+
+ x x= x
+ -x x= -x
+ x !x= -x
+ -x !x= x
+
+ Conditional syntax examples:
+
+ Compact Form Equivalent Expanded Form
+
+ foo[bar?] bar? ( foo[bar] ) !bar? ( foo )
+ foo[!bar?] bar? ( foo ) !bar? ( foo[-bar] )
+ foo[bar=] bar? ( foo[bar] ) !bar? ( foo[-bar] )
+ foo[!bar=] bar? ( foo[-bar] ) !bar? ( foo[bar] )
+
+ """
+ enabled_flags = set(self.enabled)
+ disabled_flags = set(self.disabled)
+
+ tokens = []
+ usedep_re = _get_usedep_re(self.eapi)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+ if default is None:
+ default = ""
+
+ if operator == "?":
+ if flag in use:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "=":
+ if flag in use:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ else:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!=":
+ if flag in use:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "!?":
+ if flag not in use:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ tokens.append(x)
+
+ return _use_dep(tokens, self.eapi, enabled_flags=enabled_flags, disabled_flags=disabled_flags, \
+ missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, required=self.required)
+
+ def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+ """
+ Create a new instance with satisfied use deps removed.
+ """
+ if parent_use is None and self.conditional:
+ raise InvalidAtom("violated_conditionals needs 'parent_use'" + \
+ " parameter for conditional flags.")
+
+ enabled_flags = set()
+ disabled_flags = set()
+
+ conditional = {}
+ tokens = []
+
+ all_defaults = frozenset(chain(self.missing_enabled, self.missing_disabled))
+
+ def validate_flag(flag):
+ return is_valid_flag(flag) or flag in all_defaults
+
+ usedep_re = _get_usedep_re(self.eapi)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+
+ if not validate_flag(flag):
+ tokens.append(x)
+ if not operator:
+ enabled_flags.add(flag)
+ elif operator == "-":
+ disabled_flags.add(flag)
+ elif operator == "?":
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ conditional.setdefault("disabled", set()).add(flag)
+
+ continue
+
+ if not operator:
+ if flag not in other_use:
+ if is_valid_flag(flag) or flag in self.missing_disabled:
+ tokens.append(x)
+ enabled_flags.add(flag)
+ elif operator == "-":
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ disabled_flags.add(flag)
+ else:
+ tokens.append(x)
+ disabled_flags.add(flag)
+ elif operator == "?":
+ if flag not in parent_use or flag in other_use:
+ continue
+
+ if is_valid_flag(flag) or flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ if flag in parent_use and flag not in other_use:
+ if is_valid_flag(flag):
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ else:
+ if flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ elif flag not in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ if flag not in parent_use and flag not in other_use:
+ if is_valid_flag(flag):
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ else:
+ if flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif flag in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ if flag not in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag) and flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("disabled", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("disabled", set()).add(flag)
+
+ return _use_dep(tokens, self.eapi, enabled_flags=enabled_flags, disabled_flags=disabled_flags, \
+ missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, \
+ conditional=conditional, required=self.required)
+
+ def _eval_qa_conditionals(self, use_mask, use_force):
+ """
+ For repoman, evaluate all possible combinations within the constraints
+ of the given use.force and use.mask settings. The result may seem
+ ambiguous in the sense that the same flag can be in both the enabled
+ and disabled sets, but this is useful within the context of how its
+ intended to be used by repoman. It is assumed that the caller has
+ already ensured that there is no intersection between the given
+ use_mask and use_force sets when necessary.
+ """
+ enabled_flags = set(self.enabled)
+ disabled_flags = set(self.disabled)
+ missing_enabled = self.missing_enabled
+ missing_disabled = self.missing_disabled
+
+ tokens = []
+ usedep_re = _get_usedep_re(self.eapi)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+ if default is None:
+ default = ""
+
+ if operator == "?":
+ if flag not in use_mask:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "=":
+ if flag not in use_mask:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ if flag not in use_force:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!=":
+ if flag not in use_force:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ if flag not in use_mask:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!?":
+ if flag not in use_force:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ tokens.append(x)
+
+ return _use_dep(tokens, self.eapi, enabled_flags=enabled_flags, disabled_flags=disabled_flags, \
+ missing_enabled=missing_enabled, missing_disabled=missing_disabled, required=self.required)
+
+if sys.hexversion < 0x3000000:
+ _atom_base = unicode
+else:
+ _atom_base = str
+
+class Atom(_atom_base):
+
+ """
+ For compatibility with existing atom string manipulation code, this
+ class emulates most of the str methods that are useful with atoms.
+ """
+
+ class _blocker(object):
+ __slots__ = ("overlap",)
+
+ class _overlap(object):
+ __slots__ = ("forbid",)
+
+ def __init__(self, forbid=False):
+ self.forbid = forbid
+
+ def __init__(self, forbid_overlap=False):
+ self.overlap = self._overlap(forbid=forbid_overlap)
+
+ def __new__(cls, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=False,
+ _use=None, eapi=None, is_valid_flag=None):
+ return _atom_base.__new__(cls, s)
+
+ def __init__(self, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=False,
+ _use=None, eapi=None, is_valid_flag=None):
+ if isinstance(s, Atom):
+ # This is an efficiency assertion, to ensure that the Atom
+ # constructor is not called redundantly.
+ raise TypeError(_("Expected %s, got %s") % \
+ (_atom_base, type(s)))
+
+ if not isinstance(s, _atom_base):
+ # Avoid TypeError from _atom_base.__init__ with PyPy.
+ s = _unicode_decode(s)
+
+ _atom_base.__init__(s)
+
+ if "!" == s[:1]:
+ blocker = self._blocker(forbid_overlap=("!" == s[1:2]))
+ if blocker.overlap.forbid:
+ s = s[2:]
+ else:
+ s = s[1:]
+ else:
+ blocker = False
+ self.__dict__['blocker'] = blocker
+ m = _atom_re.match(s)
+ extended_syntax = False
+ if m is None:
+ if allow_wildcard:
+ m = _atom_wildcard_re.match(s)
+ if m is None:
+ raise InvalidAtom(self)
+ op = None
+ gdict = m.groupdict()
+ cpv = cp = gdict['simple']
+ if cpv.find("**") != -1:
+ raise InvalidAtom(self)
+ slot = gdict['slot']
+ repo = gdict['repo']
+ use_str = None
+ extended_syntax = True
+ else:
+ raise InvalidAtom(self)
+ elif m.group('op') is not None:
+ base = _atom_re.groupindex['op']
+ op = m.group(base + 1)
+ cpv = m.group(base + 2)
+ cp = m.group(base + 3)
+ slot = m.group(_atom_re.groups - 2)
+ repo = m.group(_atom_re.groups - 1)
+ use_str = m.group(_atom_re.groups)
+ if m.group(base + 4) is not None:
+ raise InvalidAtom(self)
+ elif m.group('star') is not None:
+ base = _atom_re.groupindex['star']
+ op = '=*'
+ cpv = m.group(base + 1)
+ cp = m.group(base + 2)
+ slot = m.group(_atom_re.groups - 2)
+ repo = m.group(_atom_re.groups - 1)
+ use_str = m.group(_atom_re.groups)
+ if m.group(base + 3) is not None:
+ raise InvalidAtom(self)
+ elif m.group('simple') is not None:
+ op = None
+ cpv = cp = m.group(_atom_re.groupindex['simple'] + 1)
+ slot = m.group(_atom_re.groups - 2)
+ repo = m.group(_atom_re.groups - 1)
+ use_str = m.group(_atom_re.groups)
+ if m.group(_atom_re.groupindex['simple'] + 2) is not None:
+ raise InvalidAtom(self)
+
+ else:
+ raise AssertionError(_("required group not found in atom: '%s'") % self)
+ self.__dict__['cp'] = cp
+ self.__dict__['cpv'] = cpv
+ self.__dict__['repo'] = repo
+ self.__dict__['slot'] = slot
+ self.__dict__['operator'] = op
+ self.__dict__['extended_syntax'] = extended_syntax
+
+ if not (repo is None or allow_repo):
+ raise InvalidAtom(self)
+
+ if use_str is not None:
+ if _use is not None:
+ use = _use
+ else:
+ use = _use_dep(use_str[1:-1].split(","), eapi)
+ without_use = Atom(m.group('without_use'), allow_repo=allow_repo)
+ else:
+ use = None
+ if unevaluated_atom is not None and \
+ unevaluated_atom.use is not None:
+ # unevaluated_atom.use is used for IUSE checks when matching
+ # packages, so it must not propagate to without_use
+ without_use = Atom(s, allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo)
+ else:
+ without_use = self
+
+ self.__dict__['use'] = use
+ self.__dict__['without_use'] = without_use
+
+ if unevaluated_atom:
+ self.__dict__['unevaluated_atom'] = unevaluated_atom
+ else:
+ self.__dict__['unevaluated_atom'] = self
+
+ if eapi is not None:
+ if not isinstance(eapi, basestring):
+ raise TypeError('expected eapi argument of ' + \
+ '%s, got %s: %s' % (basestring, type(eapi), eapi,))
+ if self.slot and not eapi_has_slot_deps(eapi):
+ raise InvalidAtom(
+ _("Slot deps are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ if self.use:
+ if not eapi_has_use_deps(eapi):
+ raise InvalidAtom(
+ _("Use deps are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ elif not eapi_has_use_dep_defaults(eapi) and \
+ (self.use.missing_enabled or self.use.missing_disabled):
+ raise InvalidAtom(
+ _("Use dep defaults are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ if is_valid_flag is not None and self.use.conditional:
+ invalid_flag = None
+ try:
+ for conditional_type, flags in \
+ self.use.conditional.items():
+ for flag in flags:
+ if not is_valid_flag(flag):
+ invalid_flag = (conditional_type, flag)
+ raise StopIteration()
+ except StopIteration:
+ pass
+ if invalid_flag is not None:
+ conditional_type, flag = invalid_flag
+ conditional_str = _use_dep._conditional_strings[conditional_type]
+ msg = _("USE flag '%s' referenced in " + \
+ "conditional '%s' in atom '%s' is not in IUSE") \
+ % (flag, conditional_str % flag, self)
+ raise InvalidAtom(msg, category='IUSE.missing')
+ if self.blocker and self.blocker.overlap.forbid and not eapi_has_strong_blocks(eapi):
+ raise InvalidAtom(
+ _("Strong blocks are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+
+ @property
+ def without_repo(self):
+ if self.repo is None:
+ return self
+ return Atom(self.replace(_repo_separator + self.repo, '', 1),
+ allow_wildcard=True)
+
+ @property
+ def without_slot(self):
+ if self.slot is None:
+ return self
+ return Atom(self.replace(_slot_separator + self.slot, '', 1),
+ allow_repo=True, allow_wildcard=True)
+
+ def __setattr__(self, name, value):
+ raise AttributeError("Atom instances are immutable",
+ self.__class__, name, value)
+
+ def intersects(self, other):
+ """
+ Atoms with different cpv, operator or use attributes cause this method
+ to return False even though there may actually be some intersection.
+ TODO: Detect more forms of intersection.
+ @param other: The package atom to match
+ @type other: Atom
+ @rtype: Boolean
+ @return: True if this atom and the other atom intersect,
+ False otherwise.
+ """
+ if not isinstance(other, Atom):
+ raise TypeError("expected %s, got %s" % \
+ (Atom, type(other)))
+
+ if self == other:
+ return True
+
+ if self.cp != other.cp or \
+ self.use != other.use or \
+ self.operator != other.operator or \
+ self.cpv != other.cpv:
+ return False
+
+ if self.slot is None or \
+ other.slot is None or \
+ self.slot == other.slot:
+ return True
+
+ return False
+
+ def evaluate_conditionals(self, use):
+ """
+ Create an atom instance with any USE conditionals evaluated.
+ @param use: The set of enabled USE flags
+ @type use: set
+ @rtype: Atom
+ @return: an atom instance with any USE conditionals evaluated
+ """
+ if not (self.use and self.use.conditional):
+ return self
+ atom = remove_slot(self)
+ if self.slot:
+ atom += ":%s" % self.slot
+ use_dep = self.use.evaluate_conditionals(use)
+ atom += str(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+ """
+ Create an atom instance with any USE conditional removed, that is
+ satisfied by other_use.
+ @param other_use: The set of enabled USE flags
+ @type other_use: set
+ @param is_valid_flag: Function that decides if a use flag is referenceable in use deps
+ @type is_valid_flag: function
+ @param parent_use: Set of enabled use flags of the package requiring this atom
+ @type parent_use: set
+ @rtype: Atom
+ @return: an atom instance with any satisfied USE conditionals removed
+ """
+ if not self.use:
+ return self
+ atom = remove_slot(self)
+ if self.slot:
+ atom += ":%s" % self.slot
+ use_dep = self.use.violated_conditionals(other_use, is_valid_flag, parent_use)
+ atom += str(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def _eval_qa_conditionals(self, use_mask, use_force):
+ if not (self.use and self.use.conditional):
+ return self
+ atom = remove_slot(self)
+ if self.slot:
+ atom += ":%s" % self.slot
+ use_dep = self.use._eval_qa_conditionals(use_mask, use_force)
+ atom += str(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def __copy__(self):
+ """Immutable, so returns self."""
+ return self
+
+ def __deepcopy__(self, memo=None):
+ """Immutable, so returns self."""
+ memo[id(self)] = self
+ return self
+
+_extended_cp_re_cache = {}
+
+def extended_cp_match(extended_cp, other_cp):
+ """
+ Checks if an extended syntax cp matches a non extended cp
+ """
+ # Escape special '+' and '.' characters which are allowed in atoms,
+ # and convert '*' to regex equivalent.
+ global _extended_cp_re_cache
+ extended_cp_re = _extended_cp_re_cache.get(extended_cp)
+ if extended_cp_re is None:
+ extended_cp_re = re.compile("^" + re.escape(extended_cp).replace(
+ r'\*', '[^/]*') + "$")
+ _extended_cp_re_cache[extended_cp] = extended_cp_re
+ return extended_cp_re.match(other_cp) is not None
+
+class ExtendedAtomDict(portage.cache.mappings.MutableMapping):
+ """
+ dict() wrapper that supports extended atoms as keys and allows lookup
+ of a normal cp against other normal cp and extended cp.
+ The value type has to be given to __init__ and is assumed to be the same
+ for all values.
+ """
+
+ __slots__ = ('_extended', '_normal', '_value_class')
+
+ def __init__(self, value_class):
+ self._extended = {}
+ self._normal = {}
+ self._value_class = value_class
+
+ def copy(self):
+ result = self.__class__(self._value_class)
+ result._extended.update(self._extended)
+ result._normal.update(self._normal)
+ return result
+
+ def __iter__(self):
+ for k in self._normal:
+ yield k
+ for k in self._extended:
+ yield k
+
+ def iteritems(self):
+ for item in self._normal.items():
+ yield item
+ for item in self._extended.items():
+ yield item
+
+ def __delitem__(self, cp):
+ if "*" in cp:
+ return self._extended.__delitem__(cp)
+ else:
+ return self._normal.__delitem__(cp)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
+
+ def __len__(self):
+ return len(self._normal) + len(self._extended)
+
+ def setdefault(self, cp, default=None):
+ if "*" in cp:
+ return self._extended.setdefault(cp, default)
+ else:
+ return self._normal.setdefault(cp, default)
+
+ def __getitem__(self, cp):
+
+ if not isinstance(cp, basestring):
+ raise KeyError(cp)
+
+ if '*' in cp:
+ return self._extended[cp]
+
+ ret = self._value_class()
+ normal_match = self._normal.get(cp)
+ match = False
+
+ if normal_match is not None:
+ match = True
+ if hasattr(ret, "update"):
+ ret.update(normal_match)
+ elif hasattr(ret, "extend"):
+ ret.extend(normal_match)
+ else:
+ raise NotImplementedError()
+
+ for extended_cp in self._extended:
+ if extended_cp_match(extended_cp, cp):
+ match = True
+ if hasattr(ret, "update"):
+ ret.update(self._extended[extended_cp])
+ elif hasattr(ret, "extend"):
+ ret.extend(self._extended[extended_cp])
+ else:
+ raise NotImplementedError()
+
+ if not match:
+ raise KeyError(cp)
+
+ return ret
+
+ def __setitem__(self, cp, val):
+ if "*" in cp:
+ self._extended[cp] = val
+ else:
+ self._normal[cp] = val
+
+ def __eq__(self, other):
+ return self._value_class == other._value_class and \
+ self._extended == other._extended and \
+ self._normal == other._normal
+
+ def clear(self):
+ self._extended.clear()
+ self._normal.clear()
+
+
+def get_operator(mydep):
+ """
+ Return the operator used in a depstring.
+
+ Example usage:
+ >>> from portage.dep import *
+ >>> get_operator(">=test-1.0")
+ '>='
+
+ @param mydep: The dep string to check
+ @type mydep: String
+ @rtype: String
+ @return: The operator. One of:
+ '~', '=', '>', '<', '=*', '>=', or '<='
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep)
+
+ return mydep.operator
+
+def dep_getcpv(mydep):
+ """
+ Return the category-package-version with any operators/slot specifications stripped off
+
+ Example usage:
+ >>> dep_getcpv('>=media-libs/test-3.0')
+ 'media-libs/test-3.0'
+
+ @param mydep: The depstring
+ @type mydep: String
+ @rtype: String
+ @return: The depstring with the operator removed
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep)
+
+ return mydep.cpv
+
+def dep_getslot(mydep):
+ """
+ Retrieve the slot on a depend.
+
+ Example usage:
+ >>> dep_getslot('app-misc/test:3')
+ '3'
+
+ @param mydep: The depstring to retrieve the slot of
+ @type mydep: String
+ @rtype: String
+ @return: The slot
+ """
+ slot = getattr(mydep, "slot", False)
+ if slot is not False:
+ return slot
+
+ #remove repo_name if present
+ mydep = mydep.split(_repo_separator)[0]
+
+ colon = mydep.find(_slot_separator)
+ if colon != -1:
+ bracket = mydep.find("[", colon)
+ if bracket == -1:
+ return mydep[colon+1:]
+ else:
+ return mydep[colon+1:bracket]
+ return None
+
+def dep_getrepo(mydep):
+ """
+ Retrieve the repo on a depend.
+
+ Example usage:
+ >>> dep_getrepo('app-misc/test::repository')
+ 'repository'
+
+ @param mydep: The depstring to retrieve the repository of
+ @type mydep: String
+ @rtype: String
+ @return: The repository name
+ """
+ repo = getattr(mydep, "repo", False)
+ if repo is not False:
+ return repo
+
+ metadata = getattr(mydep, "metadata", False)
+ if metadata:
+ repo = metadata.get('repository', False)
+ if repo is not False:
+ return repo
+
+ colon = mydep.find(_repo_separator)
+ if colon != -1:
+ bracket = mydep.find("[", colon)
+ if bracket == -1:
+ return mydep[colon+2:]
+ else:
+ return mydep[colon+2:bracket]
+ return None
+def remove_slot(mydep):
+ """
+ Removes dep components from the right side of an atom:
+ * slot
+ * use
+ * repo
+ And repo_name from the left side.
+ """
+ colon = mydep.find(_slot_separator)
+ if colon != -1:
+ mydep = mydep[:colon]
+ else:
+ bracket = mydep.find("[")
+ if bracket != -1:
+ mydep = mydep[:bracket]
+ return mydep
+
+def dep_getusedeps( depend ):
+ """
+ Pull a listing of USE Dependencies out of a dep atom.
+
+ Example usage:
+ >>> dep_getusedeps('app-misc/test:3[foo,-bar]')
+ ('foo', '-bar')
+
+ @param depend: The depstring to process
+ @type depend: String
+ @rtype: List
+ @return: List of use flags ( or [] if no flags exist )
+ """
+ use_list = []
+ open_bracket = depend.find('[')
+ # -1 = failure (think c++ string::npos)
+ comma_separated = False
+ bracket_count = 0
+ while( open_bracket != -1 ):
+ bracket_count += 1
+ if bracket_count > 1:
+ raise InvalidAtom(_("USE Dependency with more "
+ "than one set of brackets: %s") % (depend,))
+ close_bracket = depend.find(']', open_bracket )
+ if close_bracket == -1:
+ raise InvalidAtom(_("USE Dependency with no closing bracket: %s") % depend )
+ use = depend[open_bracket + 1: close_bracket]
+ # foo[1:1] may return '' instead of None, we don't want '' in the result
+ if not use:
+ raise InvalidAtom(_("USE Dependency with "
+ "no use flag ([]): %s") % depend )
+ if not comma_separated:
+ comma_separated = "," in use
+
+ if comma_separated and bracket_count > 1:
+ raise InvalidAtom(_("USE Dependency contains a mixture of "
+ "comma and bracket separators: %s") % depend )
+
+ if comma_separated:
+ for x in use.split(","):
+ if x:
+ use_list.append(x)
+ else:
+ raise InvalidAtom(_("USE Dependency with no use "
+ "flag next to comma: %s") % depend )
+ else:
+ use_list.append(use)
+
+ # Find next use flag
+ open_bracket = depend.find( '[', open_bracket+1 )
+ return tuple(use_list)
+
+# \w is [a-zA-Z0-9_]
+
+# 2.1.3 A slot name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_slot_separator = ":"
+_slot = r'([\w+][\w+.-]*)'
+_slot_re = re.compile('^' + _slot + '$', re.VERBOSE)
+
+_use = r'\[.*\]'
+_op = r'([=~]|[><]=?)'
+_repo_separator = "::"
+_repo_name = r'[\w][\w-]*'
+_repo = r'(?:' + _repo_separator + '(' + _repo_name + ')' + ')?'
+
+_atom_re = re.compile('^(?P<without_use>(?:' +
+ '(?P<op>' + _op + _cpv + ')|' +
+ '(?P<star>=' + _cpv + r'\*)|' +
+ '(?P<simple>' + _cp + '))' +
+ '(' + _slot_separator + _slot + ')?' + _repo + ')(' + _use + ')?$', re.VERBOSE)
+
+_extended_cat = r'[\w+*][\w+.*-]*'
+_extended_pkg = r'[\w+*][\w+*-]*?'
+
+_atom_wildcard_re = re.compile('(?P<simple>(' + _extended_cat + ')/(' + _extended_pkg + '))(:(?P<slot>' + _slot + '))?(' + _repo_separator + '(?P<repo>' + _repo_name + '))?$')
+
+_useflag_re = {
+ "0": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@-]*$'),
+ "4-python": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@.-]*$'),
+}
+
+def _get_useflag_re(eapi):
+ """
+ When eapi is None then validation is not as strict, since we want the
+ same to work for multiple EAPIs that may have slightly different rules.
+ @param eapi: The EAPI
+ @type eapi: String or None
+ @rtype: regular expression object
+ @return: A regular expression object that matches valid USE flags for the
+ given eapi.
+ """
+ if eapi in (None, "4-python",):
+ return _useflag_re["4-python"]
+ else:
+ return _useflag_re["0"]
+
+def isvalidatom(atom, allow_blockers=False, allow_wildcard=False, allow_repo=False):
+ """
+ Check to see if a depend atom is valid
+
+ Example usage:
+ >>> isvalidatom('media-libs/test-3.0')
+ False
+ >>> isvalidatom('>=media-libs/test-3.0')
+ True
+
+ @param atom: The depend atom to check against
+ @type atom: String or Atom
+ @rtype: Boolean
+ @return: One of the following:
+ 1) False if the atom is invalid
+ 2) True if the atom is valid
+ """
+ try:
+ if not isinstance(atom, Atom):
+ atom = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+ if not allow_blockers and atom.blocker:
+ return False
+ return True
+ except InvalidAtom:
+ return False
+
+def isjustname(mypkg):
+ """
+ Checks to see if the atom is only the package name (no version parts).
+
+ Example usage:
+ >>> isjustname('=media-libs/test-3.0')
+ False
+ >>> isjustname('media-libs/test')
+ True
+
+ @param mypkg: The package atom to check
+ @param mypkg: String or Atom
+ @rtype: Integer
+ @return: One of the following:
+ 1) False if the package string is not just the package name
+ 2) True if it is
+ """
+ try:
+ if not isinstance(mypkg, Atom):
+ mypkg = Atom(mypkg)
+ return mypkg == mypkg.cp
+ except InvalidAtom:
+ pass
+
+ for x in mypkg.split('-')[-2:]:
+ if ververify(x):
+ return False
+ return True
+
+def isspecific(mypkg):
+ """
+ Checks to see if a package is in =category/package-version or
+ package-version format.
+
+ Example usage:
+ >>> isspecific('media-libs/test')
+ False
+ >>> isspecific('=media-libs/test-3.0')
+ True
+
+ @param mypkg: The package depstring to check against
+ @type mypkg: String
+ @rtype: Boolean
+ @return: One of the following:
+ 1) False if the package string is not specific
+ 2) True if it is
+ """
+ try:
+ if not isinstance(mypkg, Atom):
+ mypkg = Atom(mypkg)
+ return mypkg != mypkg.cp
+ except InvalidAtom:
+ pass
+
+ # Fall back to legacy code for backward compatibility.
+ return not isjustname(mypkg)
+
+def dep_getkey(mydep):
+ """
+ Return the category/package-name of a depstring.
+
+ Example usage:
+ >>> dep_getkey('=media-libs/test-3.0')
+ 'media-libs/test'
+
+ @param mydep: The depstring to retrieve the category/package-name of
+ @type mydep: String
+ @rtype: String
+ @return: The package category/package-name
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
+
+ return mydep.cp
+
+def match_to_list(mypkg, mylist):
+ """
+ Searches list for entries that matches the package.
+
+ @param mypkg: The package atom to match
+ @type mypkg: String
+ @param mylist: The list of package atoms to compare against
+ @param mylist: List
+ @rtype: List
+ @return: A unique list of package atoms that match the given package atom
+ """
+ return [ x for x in set(mylist) if match_from_list(x, [mypkg]) ]
+
+def best_match_to_list(mypkg, mylist):
+ """
+ Returns the most specific entry that matches the package given.
+
+ @param mypkg: The package atom to check
+ @type mypkg: String
+ @param mylist: The list of package atoms to check against
+ @type mylist: List
+ @rtype: String
+ @return: The package atom which best matches given the following ordering:
+ - =cpv 6
+ - ~cpv 5
+ - =cpv* 4
+ - cp:slot 3
+ - >cpv 2
+ - <cpv 2
+ - >=cpv 2
+ - <=cpv 2
+ - cp 1
+ - cp:slot with extended syntax 0
+ - cp with extended syntax -1
+ """
+ operator_values = {'=':6, '~':5, '=*':4,
+ '>':2, '<':2, '>=':2, '<=':2, None:1}
+ maxvalue = -2
+ bestm = None
+ for x in match_to_list(mypkg, mylist):
+ if x.extended_syntax:
+ if dep_getslot(x) is not None:
+ if maxvalue < 0:
+ maxvalue = 0
+ bestm = x
+ else:
+ if maxvalue < -1:
+ maxvalue = -1
+ bestm = x
+ continue
+ if dep_getslot(x) is not None:
+ if maxvalue < 3:
+ maxvalue = 3
+ bestm = x
+ op_val = operator_values[x.operator]
+ if op_val > maxvalue:
+ maxvalue = op_val
+ bestm = x
+ return bestm
+
+def match_from_list(mydep, candidate_list):
+ """
+ Searches list for entries that matches the package.
+
+ @param mydep: The package atom to match
+ @type mydep: String
+ @param candidate_list: The list of package atoms to compare against
+ @param candidate_list: List
+ @rtype: List
+ @return: A list of package atoms that match the given package atom
+ """
+
+ if not candidate_list:
+ return []
+
+ from portage.util import writemsg
+ if "!" == mydep[:1]:
+ if "!" == mydep[1:2]:
+ mydep = mydep[2:]
+ else:
+ mydep = mydep[1:]
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
+
+ mycpv = mydep.cpv
+ mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
+ slot = mydep.slot
+
+ if not mycpv_cps:
+ cat, pkg = catsplit(mycpv)
+ ver = None
+ rev = None
+ else:
+ cat, pkg, ver, rev = mycpv_cps
+ if mydep == mycpv:
+ raise KeyError(_("Specific key requires an operator"
+ " (%s) (try adding an '=')") % (mydep))
+
+ if ver and rev:
+ operator = mydep.operator
+ if not operator:
+ writemsg(_("!!! Invalid atom: %s\n") % mydep, noiselevel=-1)
+ return []
+ else:
+ operator = None
+
+ mylist = []
+
+ if operator is None:
+ for x in candidate_list:
+ cp = getattr(x, "cp", None)
+ if cp is None:
+ mysplit = catpkgsplit(remove_slot(x))
+ if mysplit is not None:
+ cp = mysplit[0] + '/' + mysplit[1]
+
+ if cp is None:
+ continue
+
+ if cp == mycpv or (mydep.extended_syntax and \
+ extended_cp_match(mydep.cp, cp)):
+ mylist.append(x)
+
+ elif operator == "=": # Exact match
+ for x in candidate_list:
+ xcpv = getattr(x, "cpv", None)
+ if xcpv is None:
+ xcpv = remove_slot(x)
+ if not cpvequal(xcpv, mycpv):
+ continue
+ mylist.append(x)
+
+ elif operator == "=*": # glob match
+ # XXX: Nasty special casing for leading zeros
+ # Required as =* is a literal prefix match, so can't
+ # use vercmp
+ mysplit = catpkgsplit(mycpv)
+ myver = mysplit[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ mycpv = mysplit[0]+"/"+mysplit[1]+"-"+myver
+ for x in candidate_list:
+ xs = getattr(x, "cpv_split", None)
+ if xs is None:
+ xs = catpkgsplit(remove_slot(x))
+ myver = xs[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ xcpv = xs[0]+"/"+xs[1]+"-"+myver
+ if xcpv.startswith(mycpv):
+ mylist.append(x)
+
+ elif operator == "~": # version, any revision, match
+ for x in candidate_list:
+ xs = getattr(x, "cpv_split", None)
+ if xs is None:
+ xs = catpkgsplit(remove_slot(x))
+ if xs is None:
+ raise InvalidData(x)
+ if not cpvequal(xs[0]+"/"+xs[1]+"-"+xs[2], mycpv_cps[0]+"/"+mycpv_cps[1]+"-"+mycpv_cps[2]):
+ continue
+ if xs[2] != ver:
+ continue
+ mylist.append(x)
+
+ elif operator in [">", ">=", "<", "<="]:
+ mysplit = ["%s/%s" % (cat, pkg), ver, rev]
+ for x in candidate_list:
+ xs = getattr(x, "cpv_split", None)
+ if xs is None:
+ xs = catpkgsplit(remove_slot(x))
+ xcat, xpkg, xver, xrev = xs
+ xs = ["%s/%s" % (xcat, xpkg), xver, xrev]
+ try:
+ result = pkgcmp(xs, mysplit)
+ except ValueError: # pkgcmp may return ValueError during int() conversion
+ writemsg(_("\nInvalid package name: %s\n") % x, noiselevel=-1)
+ raise
+ if result is None:
+ continue
+ elif operator == ">":
+ if result > 0:
+ mylist.append(x)
+ elif operator == ">=":
+ if result >= 0:
+ mylist.append(x)
+ elif operator == "<":
+ if result < 0:
+ mylist.append(x)
+ elif operator == "<=":
+ if result <= 0:
+ mylist.append(x)
+ else:
+ raise KeyError(_("Unknown operator: %s") % mydep)
+ else:
+ raise KeyError(_("Unknown operator: %s") % mydep)
+
+ if slot is not None and not mydep.extended_syntax:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ xslot = getattr(x, "slot", False)
+ if xslot is False:
+ xslot = dep_getslot(x)
+ if xslot is not None and xslot != slot:
+ continue
+ mylist.append(x)
+
+ if mydep.unevaluated_atom.use:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ use = getattr(x, "use", None)
+ if use is not None:
+ if mydep.unevaluated_atom.use and \
+ not x.iuse.is_valid_flag(
+ mydep.unevaluated_atom.use.required):
+ continue
+
+ if mydep.use:
+
+ missing_enabled = mydep.use.missing_enabled.difference(x.iuse.all)
+ missing_disabled = mydep.use.missing_disabled.difference(x.iuse.all)
+
+ if mydep.use.enabled:
+ if mydep.use.enabled.intersection(missing_disabled):
+ continue
+ need_enabled = mydep.use.enabled.difference(use.enabled)
+ if need_enabled:
+ need_enabled = need_enabled.difference(missing_enabled)
+ if need_enabled:
+ continue
+
+ if mydep.use.disabled:
+ if mydep.use.disabled.intersection(missing_enabled):
+ continue
+ need_disabled = mydep.use.disabled.intersection(use.enabled)
+ if need_disabled:
+ need_disabled = need_disabled.difference(missing_disabled)
+ if need_disabled:
+ continue
+
+ mylist.append(x)
+
+ if mydep.repo:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ repo = getattr(x, "repo", False)
+ if repo is False:
+ repo = dep_getrepo(x)
+ if repo is not None and repo != mydep.repo:
+ continue
+ mylist.append(x)
+
+ return mylist
+
+def human_readable_required_use(required_use):
+ return required_use.replace("^^", "exactly-one-of").replace("||", "any-of")
+
+def get_required_use_flags(required_use):
+ """
+ Returns a set of use flags that are used in the given REQUIRED_USE string
+
+ @param required_use: REQUIRED_USE string
+ @type required_use: String
+ @rtype: Set
+ @return: Set of use flags that are used in the given REQUIRED_USE string
+ """
+
+ mysplit = required_use.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+
+ used_flags = set()
+
+ def register_token(token):
+ if token.endswith("?"):
+ token = token[:-1]
+ if token.startswith("!"):
+ token = token[1:]
+ used_flags.add(token)
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ ignore = False
+ if stack[level]:
+ if stack[level][-1] in ("||", "^^") or \
+ (not isinstance(stack[level][-1], bool) and \
+ stack[level][-1][-1] == "?"):
+ ignore = True
+ stack[level].pop()
+ stack[level].append(True)
+
+ if l and not ignore:
+ stack[level].append(all(x for x in l))
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ elif token in ("||", "^^"):
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket or "(" in token or ")" in token or \
+ "|" in token or "^" in token:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ stack[level].append(True)
+
+ register_token(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ return frozenset(used_flags)
+
+class _RequiredUseLeaf(object):
+
+ __slots__ = ('_satisfied', '_token')
+
+ def __init__(self, token, satisfied):
+ self._token = token
+ self._satisfied = satisfied
+
+ def tounicode(self):
+ return self._token
+
+class _RequiredUseBranch(object):
+
+ __slots__ = ('_children', '_operator', '_parent', '_satisfied')
+
+ def __init__(self, operator=None, parent=None):
+ self._children = []
+ self._operator = operator
+ self._parent = parent
+ self._satisfied = False
+
+ def __bool__(self):
+ return self._satisfied
+
+ def tounicode(self):
+
+ include_parens = self._parent is not None
+ tokens = []
+ if self._operator is not None:
+ tokens.append(self._operator)
+
+ if include_parens:
+ tokens.append("(")
+
+ complex_nesting = False
+ node = self
+ while node != None and not complex_nesting:
+ if node._operator in ("||", "^^"):
+ complex_nesting = True
+ else:
+ node = node._parent
+
+ if complex_nesting:
+ for child in self._children:
+ tokens.append(child.tounicode())
+ else:
+ for child in self._children:
+ if not child._satisfied:
+ tokens.append(child.tounicode())
+
+ if include_parens:
+ tokens.append(")")
+
+ return " ".join(tokens)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+def check_required_use(required_use, use, iuse_match):
+ """
+ Checks if the use flags listed in 'use' satisfy all
+ constraints specified in 'constraints'.
+
+ @param required_use: REQUIRED_USE string
+ @type required_use: String
+ @param use: Enabled use flags
+ @param use: List
+ @param iuse_match: Callable that takes a single flag argument and returns
+ True if the flag is matched, false otherwise,
+ @param iuse_match: Callable
+ @rtype: Bool
+ @return: Indicates if REQUIRED_USE constraints are satisfied
+ """
+
+ def is_active(token):
+ if token.startswith("!"):
+ flag = token[1:]
+ is_negated = True
+ else:
+ flag = token
+ is_negated = False
+
+ if not flag or not iuse_match(flag):
+ msg = _("USE flag '%s' is not in IUSE") \
+ % (flag,)
+ e = InvalidData(msg, category='IUSE.missing')
+ raise InvalidDependString(msg, errors=(e,))
+
+ return (flag in use and not is_negated) or \
+ (flag not in use and is_negated)
+
+ def is_satisfied(operator, argument):
+ if not argument:
+ #|| ( ) -> True
+ return True
+
+ if operator == "||":
+ return (True in argument)
+ elif operator == "^^":
+ return (argument.count(True) == 1)
+ elif operator[-1] == "?":
+ return (False not in argument)
+
+ mysplit = required_use.split()
+ level = 0
+ stack = [[]]
+ tree = _RequiredUseBranch()
+ node = tree
+ need_bracket = False
+
+ for token in mysplit:
+ if token == "(":
+ if not need_bracket:
+ child = _RequiredUseBranch(parent=node)
+ node._children.append(child)
+ node = child
+
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ op = None
+ if stack[level]:
+ if stack[level][-1] in ("||", "^^"):
+ op = stack[level].pop()
+ satisfied = is_satisfied(op, l)
+ stack[level].append(satisfied)
+ node._satisfied = satisfied
+
+ elif not isinstance(stack[level][-1], bool) and \
+ stack[level][-1][-1] == "?":
+ op = stack[level].pop()
+ if is_active(op[:-1]):
+ satisfied = is_satisfied(op, l)
+ stack[level].append(satisfied)
+ node._satisfied = satisfied
+ else:
+ node._satisfied = True
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ node = node._parent
+ continue
+
+ if op is None:
+ satisfied = False not in l
+ node._satisfied = satisfied
+ if l:
+ stack[level].append(satisfied)
+
+ if len(node._children) <= 1 or \
+ node._parent._operator not in ("||", "^^"):
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ for child in node._children:
+ node._parent._children.append(child)
+ if isinstance(child, _RequiredUseBranch):
+ child._parent = node._parent
+
+ elif not node._children:
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+
+ elif len(node._children) == 1 and op in ("||", "^^"):
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ node._parent._children.append(node._children[0])
+ if isinstance(node._children[0], _RequiredUseBranch):
+ node._children[0]._parent = node._parent
+ node = node._children[0]
+ if node._operator is None and \
+ node._parent._operator not in ("||", "^^"):
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ for child in node._children:
+ node._parent._children.append(child)
+ if isinstance(child, _RequiredUseBranch):
+ child._parent = node._parent
+
+ node = node._parent
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ elif token in ("||", "^^"):
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ need_bracket = True
+ stack[level].append(token)
+ child = _RequiredUseBranch(operator=token, parent=node)
+ node._children.append(child)
+ node = child
+ else:
+ if need_bracket or "(" in token or ")" in token or \
+ "|" in token or "^" in token:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ child = _RequiredUseBranch(operator=token, parent=node)
+ node._children.append(child)
+ node = child
+ else:
+ satisfied = is_active(token)
+ stack[level].append(satisfied)
+ node._children.append(_RequiredUseLeaf(token, satisfied))
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ tree._satisfied = False not in stack[0]
+ return tree
+
+def extract_affecting_use(mystr, atom, eapi=None):
+ """
+ Take a dep string and an atom and return the use flags
+ that decide if the given atom is in effect.
+
+ Example usage:
+ >>> extract_use_cond('sasl? ( dev-libs/cyrus-sasl ) \
+ !minimal? ( cxx? ( dev-libs/cyrus-sasl ) )', 'dev-libs/cyrus-sasl')
+ (['sasl', 'minimal', 'cxx'])
+
+ @param dep: The dependency string
+ @type mystr: String
+ @param atom: The atom to get into effect
+ @type atom: String
+ @rtype: Tuple of two lists of strings
+ @return: List of use flags that need to be enabled, List of use flag that need to be disabled
+ """
+ useflag_re = _get_useflag_re(eapi)
+ mysplit = mystr.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+ affecting_use = set()
+
+ def flag(conditional):
+ if conditional[0] == "!":
+ flag = conditional[1:-1]
+ else:
+ flag = conditional[:-1]
+
+ if useflag_re.match(flag) is None:
+ raise InvalidDependString(
+ _("invalid use flag '%s' in conditional '%s'") % \
+ (flag, conditional))
+
+ return flag
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def ends_in_operator(k):
+ return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
+ if len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ stack[level].extend(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ stack[level].append(l)
+
+ if l:
+ if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif len(l) == 1 and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A
+ stack[level].pop()
+ special_append()
+ elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
+ #Optimize: || ( || ( ... ) ) -> || ( ... )
+ # foo? ( foo? ( ... ) ) -> foo? ( ... )
+ # || ( foo? ( ... ) ) -> foo? ( ... )
+ stack[level].pop()
+ special_append()
+ if l[0][-1] == "?":
+ affecting_use.add(flag(l[0]))
+ else:
+ if stack[level] and stack[level][-1][-1] == "?":
+ affecting_use.add(flag(stack[level][-1]))
+ special_append()
+ else:
+ if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
+ stack[level].pop()
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ elif token == "||":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ elif token == atom:
+ stack[level].append(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ return affecting_use
diff --git a/portage_with_autodep/pym/portage/dep/dep_check.py b/portage_with_autodep/pym/portage/dep/dep_check.py
new file mode 100644
index 0000000..01d5021
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dep/dep_check.py
@@ -0,0 +1,679 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['dep_check', 'dep_eval', 'dep_wordreduce', 'dep_zapdeps']
+
+import logging
+
+import portage
+from portage import _unicode_decode
+from portage.dep import Atom, match_from_list, use_reduce
+from portage.exception import InvalidDependString, ParseError
+from portage.localization import _
+from portage.util import writemsg, writemsg_level
+from portage.versions import catpkgsplit, cpv_getkey, pkgcmp
+
+def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
+ trees=None, use_mask=None, use_force=None, **kwargs):
+ """
+ In order to solve bug #141118, recursively expand new-style virtuals so
+ as to collapse one or more levels of indirection, generating an expanded
+ search space. In dep_zapdeps, new-style virtuals will be assigned
+ zero cost regardless of whether or not they are currently installed. Virtual
+ blockers are supported but only when the virtual expands to a single
+ atom because it wouldn't necessarily make sense to block all the components
+ of a compound virtual. When more than one new-style virtual is matched,
+ the matches are sorted from highest to lowest versions and the atom is
+ expanded to || ( highest match ... lowest match )."""
+ newsplit = []
+ mytrees = trees[myroot]
+ portdb = mytrees["porttree"].dbapi
+ pkg_use_enabled = mytrees.get("pkg_use_enabled")
+ # Atoms are stored in the graph as (atom, id(atom)) tuples
+ # since each atom is considered to be a unique entity. For
+ # example, atoms that appear identical may behave differently
+ # in USE matching, depending on their unevaluated form. Also,
+ # specially generated virtual atoms may appear identical while
+ # having different _orig_atom attributes.
+ atom_graph = mytrees.get("atom_graph")
+ parent = mytrees.get("parent")
+ virt_parent = mytrees.get("virt_parent")
+ graph_parent = None
+ eapi = None
+ if parent is not None:
+ if virt_parent is not None:
+ graph_parent = virt_parent
+ parent = virt_parent
+ else:
+ graph_parent = parent
+ eapi = parent.metadata["EAPI"]
+ repoman = not mysettings.local_config
+ if kwargs["use_binaries"]:
+ portdb = trees[myroot]["bintree"].dbapi
+ pprovideddict = mysettings.pprovideddict
+ myuse = kwargs["myuse"]
+ for x in mysplit:
+ if x == "||":
+ newsplit.append(x)
+ continue
+ elif isinstance(x, list):
+ newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
+ mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
+ use_force=use_force, **kwargs))
+ continue
+
+ if not isinstance(x, Atom):
+ raise ParseError(
+ _("invalid token: '%s'") % x)
+
+ if repoman:
+ x = x._eval_qa_conditionals(use_mask, use_force)
+
+ mykey = x.cp
+ if not mykey.startswith("virtual/"):
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ if x.blocker:
+ # Virtual blockers are no longer expanded here since
+ # the un-expanded virtual atom is more useful for
+ # maintaining a cache of blocker atoms.
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ if repoman or not hasattr(portdb, 'match_pkgs') or \
+ pkg_use_enabled is None:
+ if portdb.cp_list(x.cp):
+ newsplit.append(x)
+ else:
+ # TODO: Add PROVIDE check for repoman.
+ a = []
+ myvartree = mytrees.get("vartree")
+ if myvartree is not None:
+ mysettings._populate_treeVirtuals_if_needed(myvartree)
+ mychoices = mysettings.getvirtuals().get(mykey, [])
+ for y in mychoices:
+ a.append(Atom(x.replace(x.cp, y.cp, 1)))
+ if not a:
+ newsplit.append(x)
+ elif len(a) == 1:
+ newsplit.append(a[0])
+ else:
+ newsplit.append(['||'] + a)
+ continue
+
+ pkgs = []
+ # Ignore USE deps here, since otherwise we might not
+ # get any matches. Choices with correct USE settings
+ # will be preferred in dep_zapdeps().
+ matches = portdb.match_pkgs(x.without_use)
+ # Use descending order to prefer higher versions.
+ matches.reverse()
+ for pkg in matches:
+ # only use new-style matches
+ if pkg.cp.startswith("virtual/"):
+ pkgs.append(pkg)
+
+ mychoices = []
+ if not pkgs and not portdb.cp_list(x.cp):
+ myvartree = mytrees.get("vartree")
+ if myvartree is not None:
+ mysettings._populate_treeVirtuals_if_needed(myvartree)
+ mychoices = mysettings.getvirtuals().get(mykey, [])
+
+ if not (pkgs or mychoices):
+ # This one couldn't be expanded as a new-style virtual. Old-style
+ # virtuals have already been expanded by dep_virtual, so this one
+ # is unavailable and dep_zapdeps will identify it as such. The
+ # atom is not eliminated here since it may still represent a
+ # dependency that needs to be satisfied.
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ a = []
+ for pkg in pkgs:
+ virt_atom = '=' + pkg.cpv
+ if x.unevaluated_atom.use:
+ virt_atom += str(x.unevaluated_atom.use)
+ virt_atom = Atom(virt_atom)
+ if parent is None:
+ if myuse is None:
+ virt_atom = virt_atom.evaluate_conditionals(
+ mysettings.get("PORTAGE_USE", "").split())
+ else:
+ virt_atom = virt_atom.evaluate_conditionals(myuse)
+ else:
+ virt_atom = virt_atom.evaluate_conditionals(
+ pkg_use_enabled(parent))
+ else:
+ virt_atom = Atom(virt_atom)
+
+ # Allow the depgraph to map this atom back to the
+ # original, in order to avoid distortion in places
+ # like display or conflict resolution code.
+ virt_atom.__dict__['_orig_atom'] = x
+
+ # According to GLEP 37, RDEPEND is the only dependency
+ # type that is valid for new-style virtuals. Repoman
+ # should enforce this.
+ depstring = pkg.metadata['RDEPEND']
+ pkg_kwargs = kwargs.copy()
+ pkg_kwargs["myuse"] = pkg_use_enabled(pkg)
+ if edebug:
+ writemsg_level(_("Virtual Parent: %s\n") \
+ % (pkg,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level(_("Virtual Depstring: %s\n") \
+ % (depstring,), noiselevel=-1, level=logging.DEBUG)
+
+ # Set EAPI used for validation in dep_check() recursion.
+ mytrees["virt_parent"] = pkg
+
+ try:
+ mycheck = dep_check(depstring, mydbapi, mysettings,
+ myroot=myroot, trees=trees, **pkg_kwargs)
+ finally:
+ # Restore previous EAPI after recursion.
+ if virt_parent is not None:
+ mytrees["virt_parent"] = virt_parent
+ else:
+ del mytrees["virt_parent"]
+
+ if not mycheck[0]:
+ raise ParseError(_unicode_decode("%s: %s '%s'") % \
+ (pkg, mycheck[1], depstring))
+
+ # pull in the new-style virtual
+ mycheck[1].append(virt_atom)
+ a.append(mycheck[1])
+ if atom_graph is not None:
+ virt_atom_node = (virt_atom, id(virt_atom))
+ atom_graph.add(virt_atom_node, graph_parent)
+ atom_graph.add(pkg, virt_atom_node)
+ # Plain old-style virtuals. New-style virtuals are preferred.
+ if not pkgs:
+ for y in mychoices:
+ new_atom = Atom(x.replace(x.cp, y.cp, 1))
+ matches = portdb.match(new_atom)
+ # portdb is an instance of depgraph._dep_check_composite_db, so
+ # USE conditionals are already evaluated.
+ if matches and mykey in \
+ portdb.aux_get(matches[-1], ['PROVIDE'])[0].split():
+ a.append(new_atom)
+ if atom_graph is not None:
+ atom_graph.add((new_atom, id(new_atom)),
+ graph_parent)
+
+ if not a and mychoices:
+ # Check for a virtual package.provided match.
+ for y in mychoices:
+ new_atom = Atom(x.replace(x.cp, y.cp, 1))
+ if match_from_list(new_atom,
+ pprovideddict.get(new_atom.cp, [])):
+ a.append(new_atom)
+ if atom_graph is not None:
+ atom_graph.add((new_atom, id(new_atom)), graph_parent)
+
+ if not a:
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ elif len(a) == 1:
+ newsplit.append(a[0])
+ else:
+ newsplit.append(['||'] + a)
+
+ return newsplit
+
+def dep_eval(deplist):
+ if not deplist:
+ return 1
+ if deplist[0]=="||":
+ #or list; we just need one "1"
+ for x in deplist[1:]:
+ if isinstance(x, list):
+ if dep_eval(x)==1:
+ return 1
+ elif x==1:
+ return 1
+ #XXX: unless there's no available atoms in the list
+ #in which case we need to assume that everything is
+ #okay as some ebuilds are relying on an old bug.
+ if len(deplist) == 1:
+ return 1
+ return 0
+ else:
+ for x in deplist:
+ if isinstance(x, list):
+ if dep_eval(x)==0:
+ return 0
+ elif x==0 or x==2:
+ return 0
+ return 1
+
+def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
+ """
+ Takes an unreduced and reduced deplist and removes satisfied dependencies.
+ Returned deplist contains steps that must be taken to satisfy dependencies.
+ """
+ if trees is None:
+ trees = portage.db
+ writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
+ if not reduced or unreduced == ["||"] or dep_eval(reduced):
+ return []
+
+ if unreduced[0] != "||":
+ unresolved = []
+ for x, satisfied in zip(unreduced, reduced):
+ if isinstance(x, list):
+ unresolved += dep_zapdeps(x, satisfied, myroot,
+ use_binaries=use_binaries, trees=trees)
+ elif not satisfied:
+ unresolved.append(x)
+ return unresolved
+
+ # We're at a ( || atom ... ) type level and need to make a choice
+ deps = unreduced[1:]
+ satisfieds = reduced[1:]
+
+ # Our preference order is for an the first item that:
+ # a) contains all unmasked packages with the same key as installed packages
+ # b) contains all unmasked packages
+ # c) contains masked installed packages
+ # d) is the first item
+
+ preferred_installed = []
+ preferred_in_graph = []
+ preferred_any_slot = []
+ preferred_non_installed = []
+ unsat_use_in_graph = []
+ unsat_use_installed = []
+ unsat_use_non_installed = []
+ other_installed = []
+ other_installed_some = []
+ other = []
+
+ # unsat_use_* must come after preferred_non_installed
+ # for correct ordering in cases like || ( foo[a] foo[b] ).
+ choice_bins = (
+ preferred_in_graph,
+ preferred_installed,
+ preferred_any_slot,
+ preferred_non_installed,
+ unsat_use_in_graph,
+ unsat_use_installed,
+ unsat_use_non_installed,
+ other_installed,
+ other_installed_some,
+ other,
+ )
+
+ # Alias the trees we'll be checking availability against
+ parent = trees[myroot].get("parent")
+ priority = trees[myroot].get("priority")
+ graph_db = trees[myroot].get("graph_db")
+ graph = trees[myroot].get("graph")
+ vardb = None
+ if "vartree" in trees[myroot]:
+ vardb = trees[myroot]["vartree"].dbapi
+ if use_binaries:
+ mydbapi = trees[myroot]["bintree"].dbapi
+ else:
+ mydbapi = trees[myroot]["porttree"].dbapi
+
+ # Sort the deps into installed, not installed but already
+ # in the graph and other, not installed and not in the graph
+ # and other, with values of [[required_atom], availablility]
+ for x, satisfied in zip(deps, satisfieds):
+ if isinstance(x, list):
+ atoms = dep_zapdeps(x, satisfied, myroot,
+ use_binaries=use_binaries, trees=trees)
+ else:
+ atoms = [x]
+ if vardb is None:
+ # When called by repoman, we can simply return the first choice
+ # because dep_eval() handles preference selection.
+ return atoms
+
+ all_available = True
+ all_use_satisfied = True
+ slot_map = {}
+ cp_map = {}
+ for atom in atoms:
+ if atom.blocker:
+ continue
+ # Ignore USE dependencies here since we don't want USE
+ # settings to adversely affect || preference evaluation.
+ avail_pkg = mydbapi.match(atom.without_use)
+ if avail_pkg:
+ avail_pkg = avail_pkg[-1] # highest (ascending order)
+ avail_slot = Atom("%s:%s" % (atom.cp,
+ mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
+ if not avail_pkg:
+ all_available = False
+ all_use_satisfied = False
+ break
+
+ if atom.use:
+ avail_pkg_use = mydbapi.match(atom)
+ if not avail_pkg_use:
+ all_use_satisfied = False
+ else:
+ # highest (ascending order)
+ avail_pkg_use = avail_pkg_use[-1]
+ if avail_pkg_use != avail_pkg:
+ avail_pkg = avail_pkg_use
+ avail_slot = Atom("%s:%s" % (atom.cp,
+ mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
+
+ slot_map[avail_slot] = avail_pkg
+ pkg_cp = cpv_getkey(avail_pkg)
+ highest_cpv = cp_map.get(pkg_cp)
+ if highest_cpv is None or \
+ pkgcmp(catpkgsplit(avail_pkg)[1:],
+ catpkgsplit(highest_cpv)[1:]) > 0:
+ cp_map[pkg_cp] = avail_pkg
+
+ this_choice = (atoms, slot_map, cp_map, all_available)
+ if all_available:
+ # The "all installed" criterion is not version or slot specific.
+ # If any version of a package is already in the graph then we
+ # assume that it is preferred over other possible packages choices.
+ all_installed = True
+ for atom in set(Atom(atom.cp) for atom in atoms \
+ if not atom.blocker):
+ # New-style virtuals have zero cost to install.
+ if not vardb.match(atom) and not atom.startswith("virtual/"):
+ all_installed = False
+ break
+ all_installed_slots = False
+ if all_installed:
+ all_installed_slots = True
+ for slot_atom in slot_map:
+ # New-style virtuals have zero cost to install.
+ if not vardb.match(slot_atom) and \
+ not slot_atom.startswith("virtual/"):
+ all_installed_slots = False
+ break
+ if graph_db is None:
+ if all_use_satisfied:
+ if all_installed:
+ if all_installed_slots:
+ preferred_installed.append(this_choice)
+ else:
+ preferred_any_slot.append(this_choice)
+ else:
+ preferred_non_installed.append(this_choice)
+ else:
+ if all_installed_slots:
+ unsat_use_installed.append(this_choice)
+ else:
+ unsat_use_non_installed.append(this_choice)
+ else:
+ all_in_graph = True
+ for slot_atom in slot_map:
+ # New-style virtuals have zero cost to install.
+ if slot_atom.startswith("virtual/"):
+ continue
+ # We check if the matched package has actually been
+ # added to the digraph, in order to distinguish between
+ # those packages and installed packages that may need
+ # to be uninstalled in order to resolve blockers.
+ graph_matches = graph_db.match_pkgs(slot_atom)
+ if not graph_matches or graph_matches[-1] not in graph:
+ all_in_graph = False
+ break
+ circular_atom = None
+ if all_in_graph:
+ if parent is None or priority is None:
+ pass
+ elif priority.buildtime and \
+ not (priority.satisfied or priority.optional):
+ # Check if the atom would result in a direct circular
+ # dependency and try to avoid that if it seems likely
+ # to be unresolvable. This is only relevant for
+ # buildtime deps that aren't already satisfied by an
+ # installed package.
+ cpv_slot_list = [parent]
+ for atom in atoms:
+ if atom.blocker:
+ continue
+ if vardb.match(atom):
+ # If the atom is satisfied by an installed
+ # version then it's not a circular dep.
+ continue
+ if atom.cp != parent.cp:
+ continue
+ if match_from_list(atom, cpv_slot_list):
+ circular_atom = atom
+ break
+ if circular_atom is not None:
+ other.append(this_choice)
+ else:
+ if all_use_satisfied:
+ if all_in_graph:
+ preferred_in_graph.append(this_choice)
+ elif all_installed:
+ if all_installed_slots:
+ preferred_installed.append(this_choice)
+ else:
+ preferred_any_slot.append(this_choice)
+ else:
+ preferred_non_installed.append(this_choice)
+ else:
+ if all_in_graph:
+ unsat_use_in_graph.append(this_choice)
+ elif all_installed_slots:
+ unsat_use_installed.append(this_choice)
+ else:
+ unsat_use_non_installed.append(this_choice)
+ else:
+ all_installed = True
+ some_installed = False
+ for atom in atoms:
+ if not atom.blocker:
+ if vardb.match(atom):
+ some_installed = True
+ else:
+ all_installed = False
+
+ if all_installed:
+ other_installed.append(this_choice)
+ elif some_installed:
+ other_installed_some.append(this_choice)
+ else:
+ other.append(this_choice)
+
+ # Prefer choices which contain upgrades to higher slots. This helps
+ # for deps such as || ( foo:1 foo:2 ), where we want to prefer the
+ # atom which matches the higher version rather than the atom furthest
+ # to the left. Sorting is done separately for each of choice_bins, so
+ # as not to interfere with the ordering of the bins. Because of the
+ # bin separation, the main function of this code is to allow
+ # --depclean to remove old slots (rather than to pull in new slots).
+ for choices in choice_bins:
+ if len(choices) < 2:
+ continue
+ for choice_1 in choices[1:]:
+ atoms_1, slot_map_1, cp_map_1, all_available_1 = choice_1
+ cps = set(cp_map_1)
+ for choice_2 in choices:
+ if choice_1 is choice_2:
+ # choice_1 will not be promoted, so move on
+ break
+ atoms_2, slot_map_2, cp_map_2, all_available_2 = choice_2
+ intersecting_cps = cps.intersection(cp_map_2)
+ if not intersecting_cps:
+ continue
+ has_upgrade = False
+ has_downgrade = False
+ for cp in intersecting_cps:
+ version_1 = cp_map_1[cp]
+ version_2 = cp_map_2[cp]
+ difference = pkgcmp(catpkgsplit(version_1)[1:],
+ catpkgsplit(version_2)[1:])
+ if difference != 0:
+ if difference > 0:
+ has_upgrade = True
+ else:
+ has_downgrade = True
+ break
+ if has_upgrade and not has_downgrade:
+ # promote choice_1 in front of choice_2
+ choices.remove(choice_1)
+ index_2 = choices.index(choice_2)
+ choices.insert(index_2, choice_1)
+ break
+
+ for allow_masked in (False, True):
+ for choices in choice_bins:
+ for atoms, slot_map, cp_map, all_available in choices:
+ if all_available or allow_masked:
+ return atoms
+
+ assert(False) # This point should not be reachable
+
+def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
+ use_cache=1, use_binaries=0, myroot="/", trees=None):
+ """Takes a depend string and parses the condition."""
+ edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
+ #check_config_instance(mysettings)
+ if trees is None:
+ trees = globals()["db"]
+ if use=="yes":
+ if myuse is None:
+ #default behavior
+ myusesplit = mysettings["PORTAGE_USE"].split()
+ else:
+ myusesplit = myuse
+ # We've been given useflags to use.
+ #print "USE FLAGS PASSED IN."
+ #print myuse
+ #if "bindist" in myusesplit:
+ # print "BINDIST is set!"
+ #else:
+ # print "BINDIST NOT set."
+ else:
+ #we are being run by autouse(), don't consult USE vars yet.
+ # WE ALSO CANNOT USE SETTINGS
+ myusesplit=[]
+
+ mymasks = set()
+ useforce = set()
+ useforce.add(mysettings["ARCH"])
+ if use == "all":
+ # This masking/forcing is only for repoman. In other cases, relevant
+ # masking/forcing should have already been applied via
+ # config.regenerate(). Also, binary or installed packages may have
+ # been built with flags that are now masked, and it would be
+ # inconsistent to mask them now. Additionally, myuse may consist of
+ # flags from a parent package that is being merged to a $ROOT that is
+ # different from the one that mysettings represents.
+ mymasks.update(mysettings.usemask)
+ mymasks.update(mysettings.archlist())
+ mymasks.discard(mysettings["ARCH"])
+ useforce.update(mysettings.useforce)
+ useforce.difference_update(mymasks)
+
+ # eapi code borrowed from _expand_new_virtuals()
+ mytrees = trees[myroot]
+ parent = mytrees.get("parent")
+ virt_parent = mytrees.get("virt_parent")
+ current_parent = None
+ eapi = None
+ if parent is not None:
+ if virt_parent is not None:
+ current_parent = virt_parent
+ else:
+ current_parent = parent
+
+ if current_parent is not None:
+ # Don't pass the eapi argument to use_reduce() for installed packages
+ # since previous validation will have already marked them as invalid
+ # when necessary and now we're more interested in evaluating
+ # dependencies so that things like --depclean work as well as possible
+ # in spite of partial invalidity.
+ if not current_parent.installed:
+ eapi = current_parent.metadata['EAPI']
+
+ try:
+ mysplit = use_reduce(depstring, uselist=myusesplit, masklist=mymasks, \
+ matchall=(use=="all"), excludeall=useforce, opconvert=True, \
+ token_class=Atom, eapi=eapi)
+ except InvalidDependString as e:
+ return [0, _unicode_decode("%s") % (e,)]
+
+ if mysplit == []:
+ #dependencies were reduced to nothing
+ return [1,[]]
+
+ # Recursively expand new-style virtuals so as to
+ # collapse one or more levels of indirection.
+ try:
+ mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
+ use=use, mode=mode, myuse=myuse,
+ use_force=useforce, use_mask=mymasks, use_cache=use_cache,
+ use_binaries=use_binaries, myroot=myroot, trees=trees)
+ except ParseError as e:
+ return [0, _unicode_decode("%s") % (e,)]
+
+ mysplit2=mysplit[:]
+ mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
+ if mysplit2 is None:
+ return [0, _("Invalid token")]
+
+ writemsg("\n\n\n", 1)
+ writemsg("mysplit: %s\n" % (mysplit), 1)
+ writemsg("mysplit2: %s\n" % (mysplit2), 1)
+
+ selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
+ use_binaries=use_binaries, trees=trees)
+
+ return [1, selected_atoms]
+
+def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
+ "Reduces the deplist to ones and zeros"
+ deplist=mydeplist[:]
+ for mypos, token in enumerate(deplist):
+ if isinstance(deplist[mypos], list):
+ #recurse
+ deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
+ elif deplist[mypos]=="||":
+ pass
+ elif token[:1] == "!":
+ deplist[mypos] = False
+ else:
+ mykey = deplist[mypos].cp
+ if mysettings and mykey in mysettings.pprovideddict and \
+ match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
+ deplist[mypos]=True
+ elif mydbapi is None:
+ # Assume nothing is satisfied. This forces dep_zapdeps to
+ # return all of deps the deps that have been selected
+ # (excluding those satisfied by package.provided).
+ deplist[mypos] = False
+ else:
+ if mode:
+ x = mydbapi.xmatch(mode, deplist[mypos])
+ if mode.startswith("minimum-"):
+ mydep = []
+ if x:
+ mydep.append(x)
+ else:
+ mydep = x
+ else:
+ mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
+ if mydep!=None:
+ tmp=(len(mydep)>=1)
+ if deplist[mypos][0]=="!":
+ tmp=False
+ deplist[mypos]=tmp
+ else:
+ #encountered invalid string
+ return None
+ return deplist
diff --git a/portage_with_autodep/pym/portage/dispatch_conf.py b/portage_with_autodep/pym/portage/dispatch_conf.py
new file mode 100644
index 0000000..4991020
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dispatch_conf.py
@@ -0,0 +1,188 @@
+# archive_conf.py -- functionality common to archive-conf and dispatch-conf
+# Copyright 2003-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+# Library by Wayne Davison <gentoo@blorf.net>, derived from code
+# written by Jeremy Wohl (http://igmus.org)
+
+from __future__ import print_function
+
+import os, sys, shutil
+
+import portage
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.localization import _
+
+RCS_BRANCH = '1.1.1'
+RCS_LOCK = 'rcs -ko -M -l'
+RCS_PUT = 'ci -t-"Archived config file." -m"dispatch-conf update."'
+RCS_GET = 'co'
+RCS_MERGE = "rcsmerge -p -r" + RCS_BRANCH + " '%s' > '%s'"
+
+DIFF3_MERGE = "diff3 -mE '%s' '%s' '%s' > '%s'"
+
+def diffstatusoutput_len(cmd):
+ """
+ Execute the string cmd in a shell with getstatusoutput() and return a
+ 2-tuple (status, output_length). If getstatusoutput() raises
+ UnicodeDecodeError (known to happen with python3.1), return a
+ 2-tuple (1, 1). This provides a simple way to check for non-zero
+ output length of diff commands, while providing simple handling of
+ UnicodeDecodeError when necessary.
+ """
+ try:
+ status, output = portage.subprocess_getstatusoutput(cmd)
+ return (status, len(output))
+ except UnicodeDecodeError:
+ return (1, 1)
+
+def read_config(mandatory_opts):
+ loader = KeyValuePairFileLoader(
+ '/etc/dispatch-conf.conf', None)
+ opts, errors = loader.load()
+ if not opts:
+ print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
+ sys.exit(1)
+
+ # Handle quote removal here, since KeyValuePairFileLoader doesn't do that.
+ quotes = "\"'"
+ for k, v in opts.items():
+ if v[:1] in quotes and v[:1] == v[-1:]:
+ opts[k] = v[1:-1]
+
+ for key in mandatory_opts:
+ if key not in opts:
+ if key == "merge":
+ opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'"
+ else:
+ print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)
+
+ if not os.path.exists(opts['archive-dir']):
+ os.mkdir(opts['archive-dir'])
+ # Use restrictive permissions by default, in order to protect
+ # against vulnerabilities (like bug #315603 involving rcs).
+ os.chmod(opts['archive-dir'], 0o700)
+ elif not os.path.isdir(opts['archive-dir']):
+ print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr)
+ sys.exit(1)
+
+ return opts
+
+
+def rcs_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config in rcs (on trunk). Then, if mrgconf is
+ specified and an old branch version exists, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, leave it in the archive dir with a .dist.new
+ suffix along with the last 1.1.1 branch version with a .dist suffix."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except OSError:
+ pass
+
+ if os.path.isfile(curconf):
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if os.path.exists(archive + ',v'):
+ os.system(RCS_LOCK + ' ' + archive)
+ os.system(RCS_PUT + ' ' + archive)
+
+ ret = 0
+ if newconf != '':
+ os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
+ has_branch = os.path.exists(archive)
+ if has_branch:
+ os.rename(archive, archive + '.dist')
+
+ try:
+ shutil.copy2(newconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"newconf": newconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if has_branch:
+ if mrgconf != '':
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(RCS_MERGE % (archive, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat.st_mode)
+ os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+ os.rename(archive, archive + '.dist.new')
+ return ret
+
+
+def file_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config to the archive-dir, bumping old versions
+ out of the way into .# versions (log-rotate style). Then, if mrgconf
+ was specified and there is a .dist version, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, archive it as a .dist.new version (which
+ gets moved to the .dist version at the end of the processing)."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except OSError:
+ pass
+
+ # Archive the current config file if it isn't already saved
+ if os.path.exists(archive) \
+ and diffstatusoutput_len("diff -aq '%s' '%s'" % (curconf,archive))[1] != 0:
+ suf = 1
+ while suf < 9 and os.path.exists(archive + '.' + str(suf)):
+ suf += 1
+
+ while suf > 1:
+ os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
+ suf -= 1
+
+ os.rename(archive, archive + '.1')
+
+ if os.path.isfile(curconf):
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+ if newconf != '':
+ # Save off new config file in the archive dir with .dist.new suffix
+ try:
+ shutil.copy2(newconf, archive + '.dist.new')
+ except(IOError, os.error) as why:
+ print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
+ {"newconf": newconf, "archive": archive + '.dist.new', "reason": str(why)}, file=sys.stderr)
+
+ ret = 0
+ if mrgconf != '' and os.path.exists(archive + '.dist'):
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat.st_mode)
+ os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+
+ return ret
+
+
+def rcs_archive_post_process(archive):
+ """Check in the archive file with the .dist.new suffix on the branch
+ and remove the one with the .dist suffix."""
+ os.rename(archive + '.dist.new', archive)
+ if os.path.exists(archive + '.dist'):
+ # Commit the last-distributed version onto the branch.
+ os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
+ os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
+ os.unlink(archive + '.dist')
+ else:
+ # Forcefully commit the last-distributed version onto the branch.
+ os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
+
+
+def file_archive_post_process(archive):
+ """Rename the archive file with the .dist.new suffix to a .dist suffix"""
+ os.rename(archive + '.dist.new', archive + '.dist')
diff --git a/portage_with_autodep/pym/portage/eapi.py b/portage_with_autodep/pym/portage/eapi.py
new file mode 100644
index 0000000..da5fd8c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/eapi.py
@@ -0,0 +1,50 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+def eapi_has_iuse_defaults(eapi):
+ return eapi != "0"
+
+def eapi_has_slot_deps(eapi):
+ return eapi != "0"
+
+def eapi_has_src_uri_arrows(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_has_use_deps(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_has_strong_blocks(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_has_src_prepare_and_src_configure(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_supports_prefix(eapi):
+ return eapi not in ("0", "1", "2")
+
+def eapi_exports_AA(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_exports_KV(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_exports_merge_type(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_exports_replace_vars(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_pkg_pretend(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_implicit_rdepend(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_has_dosed_dohard(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_has_required_use(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_use_dep_defaults(eapi):
+ return eapi not in ("0", "1", "2", "3")
diff --git a/portage_with_autodep/pym/portage/eclass_cache.py b/portage_with_autodep/pym/portage/eclass_cache.py
new file mode 100644
index 0000000..1374f1d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/eclass_cache.py
@@ -0,0 +1,123 @@
+# Copyright 2005-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Nicholas Carpaski (carpaski@gentoo.org), Brian Harring (ferringb@gentoo.org)
+
+__all__ = ["cache"]
+
+import stat
+import sys
+from portage.util import normalize_path
+import errno
+from portage.exception import PermissionDenied
+from portage import os
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class cache(object):
+ """
+ Maintains the cache information about eclasses used in ebuild.
+ """
+ def __init__(self, porttree_root, overlays=[]):
+
+ self.eclasses = {} # {"Name": ("location","_mtime_")}
+ self._eclass_locations = {}
+
+ # screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
+ # ~harring
+ if porttree_root:
+ self.porttree_root = porttree_root
+ self.porttrees = [self.porttree_root] + overlays
+ self.porttrees = tuple(map(normalize_path, self.porttrees))
+ self._master_eclass_root = os.path.join(self.porttrees[0], "eclass")
+ self.update_eclasses()
+ else:
+ self.porttree_root = None
+ self.porttrees = ()
+ self._master_eclass_root = None
+
+ def copy(self):
+ return self.__copy__()
+
+ def __copy__(self):
+ result = self.__class__(None)
+ result.eclasses = self.eclasses.copy()
+ result._eclass_locations = self._eclass_locations.copy()
+ result.porttree_root = self.porttree_root
+ result.porttrees = self.porttrees
+ result._master_eclass_root = self._master_eclass_root
+ return result
+
+ def append(self, other):
+ """
+ Append another instance to this instance. This will cause eclasses
+ from the other instance to override any eclasses from this instance
+ that have the same name.
+ """
+ if not isinstance(other, self.__class__):
+ raise TypeError(
+ "expected type %s, got %s" % (self.__class__, type(other)))
+ self.porttrees = self.porttrees + other.porttrees
+ self.eclasses.update(other.eclasses)
+ self._eclass_locations.update(other._eclass_locations)
+
+ def update_eclasses(self):
+ self.eclasses = {}
+ self._eclass_locations = {}
+ master_eclasses = {}
+ eclass_len = len(".eclass")
+ ignored_listdir_errnos = (errno.ENOENT, errno.ENOTDIR)
+ for x in [normalize_path(os.path.join(y,"eclass")) for y in self.porttrees]:
+ try:
+ eclass_filenames = os.listdir(x)
+ except OSError as e:
+ if e.errno in ignored_listdir_errnos:
+ del e
+ continue
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(x)
+ raise
+ for y in eclass_filenames:
+ if not y.endswith(".eclass"):
+ continue
+ try:
+ mtime = os.stat(os.path.join(x, y))[stat.ST_MTIME]
+ except OSError:
+ continue
+ ys=y[:-eclass_len]
+ if x == self._master_eclass_root:
+ master_eclasses[ys] = mtime
+ self.eclasses[ys] = (x, mtime)
+ self._eclass_locations[ys] = x
+ continue
+
+ master_mtime = master_eclasses.get(ys)
+ if master_mtime is not None:
+ if master_mtime == mtime:
+ # It appears to be identical to the master,
+ # so prefer the master entry.
+ continue
+
+ self.eclasses[ys] = (x, mtime)
+ self._eclass_locations[ys] = x
+
+ def is_eclass_data_valid(self, ec_dict):
+ if not isinstance(ec_dict, dict):
+ return False
+ for eclass, tup in ec_dict.items():
+ cached_data = self.eclasses.get(eclass, None)
+ """ Only use the mtime for validation since the probability of a
+ collision is small and, depending on the cache implementation, the
+ path may not be specified (cache from rsync mirrors, for example).
+ """
+ if cached_data is None or tup[1] != cached_data[1]:
+ return False
+
+ return True
+
+ def get_eclass_data(self, inherits):
+ ec_dict = {}
+ for x in inherits:
+ ec_dict[x] = self.eclasses[x]
+
+ return ec_dict
diff --git a/portage_with_autodep/pym/portage/elog/__init__.py b/portage_with_autodep/pym/portage/elog/__init__.py
new file mode 100644
index 0000000..1a8309d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/__init__.py
@@ -0,0 +1,182 @@
+# elog/__init__.py - elog core functions
+# Copyright 2006-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:writemsg',
+)
+
+from portage.const import EBUILD_PHASES
+from portage.exception import AlarmSignal, PortageException
+from portage.process import atexit_register
+from portage.elog.messages import collect_ebuild_messages, collect_messages
+from portage.elog.filtering import filter_loglevels
+from portage.localization import _
+from portage import os
+
+def _preload_elog_modules(settings):
+ logsystems = settings.get("PORTAGE_ELOG_SYSTEM", "").split()
+ for s in logsystems:
+ # allow per module overrides of PORTAGE_ELOG_CLASSES
+ if ":" in s:
+ s, levels = s.split(":", 1)
+ levels = levels.split(",")
+ # - is nicer than _ for module names, so allow people to use it.
+ s = s.replace("-", "_")
+ try:
+ _load_mod("portage.elog.mod_" + s)
+ except ImportError:
+ pass
+
+def _merge_logentries(a, b):
+ rValue = {}
+ phases = set(a)
+ phases.update(b)
+ for p in phases:
+ merged_msgs = []
+ rValue[p] = merged_msgs
+ for d in a, b:
+ msgs = d.get(p)
+ if msgs:
+ merged_msgs.extend(msgs)
+ return rValue
+
+def _combine_logentries(logentries):
+ # generate a single string with all log messages
+ rValue = []
+ for phase in EBUILD_PHASES:
+ if not phase in logentries:
+ continue
+ previous_type = None
+ for msgtype, msgcontent in logentries[phase]:
+ if previous_type != msgtype:
+ previous_type = msgtype
+ rValue.append("%s: %s\n" % (msgtype, phase))
+ for line in msgcontent:
+ rValue.append(line)
+ rValue.append("\n")
+ return "".join(rValue)
+
+_elog_mod_imports = {}
+def _load_mod(name):
+ global _elog_mod_imports
+ m = _elog_mod_imports.get(name)
+ if m is None:
+ m = __import__(name)
+ for comp in name.split(".")[1:]:
+ m = getattr(m, comp)
+ _elog_mod_imports[name] = m
+ return m
+
+_elog_listeners = []
+def add_listener(listener):
+ '''
+ Listeners should accept four arguments: settings, key, logentries and logtext
+ '''
+ _elog_listeners.append(listener)
+
+def remove_listener(listener):
+ '''
+ Remove previously added listener
+ '''
+ _elog_listeners.remove(listener)
+
+_elog_atexit_handlers = []
+
+def elog_process(cpv, mysettings, phasefilter=None):
+ global _elog_atexit_handlers
+
+ logsystems = mysettings.get("PORTAGE_ELOG_SYSTEM","").split()
+ for s in logsystems:
+ # allow per module overrides of PORTAGE_ELOG_CLASSES
+ if ":" in s:
+ s, levels = s.split(":", 1)
+ levels = levels.split(",")
+ # - is nicer than _ for module names, so allow people to use it.
+ s = s.replace("-", "_")
+ try:
+ _load_mod("portage.elog.mod_" + s)
+ except ImportError:
+ pass
+
+ if "T" in mysettings:
+ ebuild_logentries = collect_ebuild_messages(
+ os.path.join(mysettings["T"], "logging"))
+ else:
+ # A build dir isn't necessarily required since the messages.e*
+ # functions allow messages to be generated in-memory.
+ ebuild_logentries = {}
+ all_logentries = collect_messages(key=cpv, phasefilter=phasefilter)
+ if cpv in all_logentries:
+ # Messages generated by the python elog implementation are assumed
+ # to come first. For example, this ensures correct order for einfo
+ # messages that are generated prior to the setup phase.
+ all_logentries[cpv] = \
+ _merge_logentries(all_logentries[cpv], ebuild_logentries)
+ else:
+ all_logentries[cpv] = ebuild_logentries
+
+ my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
+ logsystems = {}
+ for token in mysettings.get("PORTAGE_ELOG_SYSTEM", "").split():
+ if ":" in token:
+ s, levels = token.split(":", 1)
+ levels = levels.split(",")
+ else:
+ s = token
+ levels = ()
+ levels_set = logsystems.get(s)
+ if levels_set is None:
+ levels_set = set()
+ logsystems[s] = levels_set
+ levels_set.update(levels)
+
+ for key in all_logentries:
+ default_logentries = filter_loglevels(all_logentries[key], my_elog_classes)
+
+ # in case the filters matched all messages and no module overrides exist
+ if len(default_logentries) == 0 and (not ":" in mysettings.get("PORTAGE_ELOG_SYSTEM", "")):
+ continue
+
+ default_fulllog = _combine_logentries(default_logentries)
+
+ # call listeners
+ for listener in _elog_listeners:
+ listener(mysettings, str(key), default_logentries, default_fulllog)
+
+ # pass the processing to the individual modules
+ for s, levels in logsystems.items():
+ # allow per module overrides of PORTAGE_ELOG_CLASSES
+ if levels:
+ mod_logentries = filter_loglevels(all_logentries[key], levels)
+ mod_fulllog = _combine_logentries(mod_logentries)
+ else:
+ mod_logentries = default_logentries
+ mod_fulllog = default_fulllog
+ if len(mod_logentries) == 0:
+ continue
+ # - is nicer than _ for module names, so allow people to use it.
+ s = s.replace("-", "_")
+ try:
+ m = _load_mod("portage.elog.mod_" + s)
+ # Timeout after one minute (in case something like the mail
+ # module gets hung).
+ try:
+ AlarmSignal.register(60)
+ m.process(mysettings, str(key), mod_logentries, mod_fulllog)
+ finally:
+ AlarmSignal.unregister()
+ if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
+ _elog_atexit_handlers.append(m.finalize)
+ atexit_register(m.finalize)
+ except (ImportError, AttributeError) as e:
+ writemsg(_("!!! Error while importing logging modules "
+ "while loading \"mod_%s\":\n") % str(s))
+ writemsg("%s\n" % str(e), noiselevel=-1)
+ except AlarmSignal:
+ writemsg("Timeout in elog_process for system '%s'\n" % s,
+ noiselevel=-1)
+ except PortageException as e:
+ writemsg("%s\n" % str(e), noiselevel=-1)
+
diff --git a/portage_with_autodep/pym/portage/elog/filtering.py b/portage_with_autodep/pym/portage/elog/filtering.py
new file mode 100644
index 0000000..82181a4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/filtering.py
@@ -0,0 +1,15 @@
+# elog/messages.py - elog core functions
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+def filter_loglevels(logentries, loglevels):
+ # remove unwanted entries from all logentries
+ rValue = {}
+ loglevels = [x.upper() for x in loglevels]
+ for phase in logentries:
+ for msgtype, msgcontent in logentries[phase]:
+ if msgtype.upper() in loglevels or "*" in loglevels:
+ if phase not in rValue:
+ rValue[phase] = []
+ rValue[phase].append((msgtype, msgcontent))
+ return rValue
diff --git a/portage_with_autodep/pym/portage/elog/messages.py b/portage_with_autodep/pym/portage/elog/messages.py
new file mode 100644
index 0000000..6c1580a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/messages.py
@@ -0,0 +1,172 @@
+# elog/messages.py - elog core functions
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.output:colorize',
+ 'portage.util:writemsg',
+)
+
+from portage.const import EBUILD_PHASES
+from portage.localization import _
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+
+import io
+import sys
+
+def collect_ebuild_messages(path):
+ """ Collect elog messages generated by the bash logging function stored
+ at 'path'.
+ """
+ mylogfiles = None
+ try:
+ mylogfiles = os.listdir(path)
+ except OSError:
+ pass
+ # shortcut for packages without any messages
+ if not mylogfiles:
+ return {}
+ # exploit listdir() file order so we process log entries in chronological order
+ mylogfiles.reverse()
+ logentries = {}
+ for msgfunction in mylogfiles:
+ filename = os.path.join(path, msgfunction)
+ if msgfunction not in EBUILD_PHASES:
+ writemsg(_("!!! can't process invalid log file: %s\n") % filename,
+ noiselevel=-1)
+ continue
+ if not msgfunction in logentries:
+ logentries[msgfunction] = []
+ lastmsgtype = None
+ msgcontent = []
+ for l in io.open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'):
+ if not l:
+ continue
+ try:
+ msgtype, msg = l.split(" ", 1)
+ except ValueError:
+ writemsg(_("!!! malformed entry in "
+ "log file: '%s'\n") % filename, noiselevel=-1)
+ continue
+
+ if lastmsgtype is None:
+ lastmsgtype = msgtype
+
+ if msgtype == lastmsgtype:
+ msgcontent.append(msg)
+ else:
+ if msgcontent:
+ logentries[msgfunction].append((lastmsgtype, msgcontent))
+ msgcontent = [msg]
+ lastmsgtype = msgtype
+ if msgcontent:
+ logentries[msgfunction].append((lastmsgtype, msgcontent))
+
+ # clean logfiles to avoid repetitions
+ for f in mylogfiles:
+ try:
+ os.unlink(os.path.join(path, f))
+ except OSError:
+ pass
+ return logentries
+
+_msgbuffer = {}
+def _elog_base(level, msg, phase="other", key=None, color=None, out=None):
+ """ Backend for the other messaging functions, should not be called
+ directly.
+ """
+
+ # TODO: Have callers pass in a more unique 'key' parameter than a plain
+ # cpv, in order to ensure that messages are properly grouped together
+ # for a given package instance, and also to ensure that each elog module's
+ # process() function is only called once for each unique package. This is
+ # needed not only when building packages in parallel, but also to preserve
+ # continuity in messages when a package is simply updated, since we don't
+ # want the elog_process() call from the uninstall of the old version to
+ # cause discontinuity in the elog messages of the new one being installed.
+
+ global _msgbuffer
+
+ if out is None:
+ out = sys.stdout
+
+ if color is None:
+ color = "GOOD"
+
+ msg = _unicode_decode(msg,
+ encoding=_encodings['content'], errors='replace')
+
+ formatted_msg = colorize(color, " * ") + msg + "\n"
+
+ # avoid potential UnicodeEncodeError
+ if out in (sys.stdout, sys.stderr):
+ formatted_msg = _unicode_encode(formatted_msg,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ if sys.hexversion >= 0x3000000:
+ out = out.buffer
+
+ out.write(formatted_msg)
+
+ if key not in _msgbuffer:
+ _msgbuffer[key] = {}
+ if phase not in _msgbuffer[key]:
+ _msgbuffer[key][phase] = []
+ _msgbuffer[key][phase].append((level, msg))
+
+ #raise NotImplementedError()
+
+def collect_messages(key=None, phasefilter=None):
+ global _msgbuffer
+
+ if key is None:
+ rValue = _msgbuffer
+ _reset_buffer()
+ else:
+ rValue = {}
+ if key in _msgbuffer:
+ if phasefilter is None:
+ rValue[key] = _msgbuffer.pop(key)
+ else:
+ rValue[key] = {}
+ for phase in phasefilter:
+ try:
+ rValue[key][phase] = _msgbuffer[key].pop(phase)
+ except KeyError:
+ pass
+ if not _msgbuffer[key]:
+ del _msgbuffer[key]
+ return rValue
+
+def _reset_buffer():
+ """ Reset the internal message buffer when it has been processed,
+ should not be called directly.
+ """
+ global _msgbuffer
+
+ _msgbuffer = {}
+
+# creating and exporting the actual messaging functions
+_functions = { "einfo": ("INFO", "GOOD"),
+ "elog": ("LOG", "GOOD"),
+ "ewarn": ("WARN", "WARN"),
+ "eqawarn": ("QA", "WARN"),
+ "eerror": ("ERROR", "BAD"),
+}
+
+def _make_msgfunction(level, color):
+ def _elog(msg, phase="other", key=None, out=None):
+ """ Display and log a message assigned to the given key/cpv
+ (or unassigned if no key is given).
+ """
+ _elog_base(level, msg, phase=phase, key=key, color=color, out=out)
+ return _elog
+
+for f in _functions:
+ setattr(sys.modules[__name__], f, _make_msgfunction(_functions[f][0], _functions[f][1]))
+del f, _functions
diff --git a/portage_with_autodep/pym/portage/elog/mod_custom.py b/portage_with_autodep/pym/portage/elog/mod_custom.py
new file mode 100644
index 0000000..e1a5223
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_custom.py
@@ -0,0 +1,19 @@
+# elog/mod_custom.py - elog dispatch module
+# Copyright 2006-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.elog.mod_save, portage.process, portage.exception
+
+def process(mysettings, key, logentries, fulltext):
+ elogfilename = portage.elog.mod_save.process(mysettings, key, logentries, fulltext)
+
+ if not mysettings.get("PORTAGE_ELOG_COMMAND"):
+ raise portage.exception.MissingParameter("!!! Custom logging requested but PORTAGE_ELOG_COMMAND is not defined")
+ else:
+ mylogcmd = mysettings["PORTAGE_ELOG_COMMAND"]
+ mylogcmd = mylogcmd.replace("${LOGFILE}", elogfilename)
+ mylogcmd = mylogcmd.replace("${PACKAGE}", key)
+ retval = portage.process.spawn_bash(mylogcmd)
+ if retval != 0:
+ raise portage.exception.PortageException("!!! PORTAGE_ELOG_COMMAND failed with exitcode %d" % retval)
+ return
diff --git a/portage_with_autodep/pym/portage/elog/mod_echo.py b/portage_with_autodep/pym/portage/elog/mod_echo.py
new file mode 100644
index 0000000..5de25bf
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_echo.py
@@ -0,0 +1,46 @@
+# elog/mod_echo.py - elog dispatch module
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+from portage.output import EOutput, colorize
+from portage.const import EBUILD_PHASES
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+_items = []
+def process(mysettings, key, logentries, fulltext):
+ global _items
+ _items.append((mysettings["ROOT"], key, logentries))
+
+def finalize():
+ global _items
+ printer = EOutput()
+ for root, key, logentries in _items:
+ print()
+ if root == "/":
+ printer.einfo(_("Messages for package %s:") %
+ colorize("INFORM", key))
+ else:
+ printer.einfo(_("Messages for package %(pkg)s merged to %(root)s:") %
+ {"pkg": colorize("INFORM", key), "root": root})
+ print()
+ for phase in EBUILD_PHASES:
+ if phase not in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ fmap = {"INFO": printer.einfo,
+ "WARN": printer.ewarn,
+ "ERROR": printer.eerror,
+ "LOG": printer.einfo,
+ "QA": printer.ewarn}
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ fmap[msgtype](line.strip("\n"))
+ _items = []
+ return
diff --git a/portage_with_autodep/pym/portage/elog/mod_mail.py b/portage_with_autodep/pym/portage/elog/mod_mail.py
new file mode 100644
index 0000000..086c683
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_mail.py
@@ -0,0 +1,43 @@
+# elog/mod_mail.py - elog dispatch module
+# Copyright 2006-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.mail, socket
+from portage.exception import PortageException
+from portage.localization import _
+from portage.util import writemsg
+
+def process(mysettings, key, logentries, fulltext):
+ if "PORTAGE_ELOG_MAILURI" in mysettings:
+ myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
+ else:
+ myrecipient = "root@localhost"
+
+ myfrom = mysettings["PORTAGE_ELOG_MAILFROM"]
+ myfrom = myfrom.replace("${HOST}", socket.getfqdn())
+ mysubject = mysettings["PORTAGE_ELOG_MAILSUBJECT"]
+ mysubject = mysubject.replace("${PACKAGE}", key)
+ mysubject = mysubject.replace("${HOST}", socket.getfqdn())
+
+ # look at the phases listed in our logentries to figure out what action was performed
+ action = _("merged")
+ for phase in logentries:
+ # if we found a *rm phase assume that the package was unmerged
+ if phase in ["postrm", "prerm"]:
+ action = _("unmerged")
+ # if we think that the package was unmerged, make sure there was no unexpected
+ # phase recorded to avoid misinformation
+ if action == _("unmerged"):
+ for phase in logentries:
+ if phase not in ["postrm", "prerm", "other"]:
+ action = _("unknown")
+
+ mysubject = mysubject.replace("${ACTION}", action)
+
+ mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, fulltext)
+ try:
+ portage.mail.send_mail(mysettings, mymessage)
+ except PortageException as e:
+ writemsg("%s\n" % str(e), noiselevel=-1)
+
+ return
diff --git a/portage_with_autodep/pym/portage/elog/mod_mail_summary.py b/portage_with_autodep/pym/portage/elog/mod_mail_summary.py
new file mode 100644
index 0000000..0bd67f2
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_mail_summary.py
@@ -0,0 +1,89 @@
+# elog/mod_mail_summary.py - elog dispatch module
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.exception import AlarmSignal, PortageException
+from portage.localization import _
+from portage.util import writemsg
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+
+import socket
+import time
+
+_config_keys = ('PORTAGE_ELOG_MAILURI', 'PORTAGE_ELOG_MAILFROM',
+ 'PORTAGE_ELOG_MAILSUBJECT',)
+_items = {}
+def process(mysettings, key, logentries, fulltext):
+ global _items
+ time_str = _unicode_decode(
+ time.strftime("%Y%m%d-%H%M%S %Z", time.localtime(time.time())),
+ encoding=_encodings['content'], errors='replace')
+ header = _(">>> Messages generated for package %(pkg)s by process %(pid)d on %(time)s:\n\n") % \
+ {"pkg": key, "pid": os.getpid(), "time": time_str}
+ config_root = mysettings["PORTAGE_CONFIGROOT"]
+
+ # Copy needed variables from the config instance,
+ # since we don't need to hold a reference for the
+ # whole thing. This also makes it possible to
+ # rely on per-package variable settings that may
+ # have come from /etc/portage/package.env, since
+ # we'll be isolated from any future mutations of
+ # mysettings.
+ config_dict = {}
+ for k in _config_keys:
+ v = mysettings.get(k)
+ if v is not None:
+ config_dict[k] = v
+
+ config_dict, items = _items.setdefault(config_root, (config_dict, {}))
+ items[key] = header + fulltext
+
+def finalize():
+ global _items
+ for mysettings, items in _items.values():
+ _finalize(mysettings, items)
+ _items.clear()
+
+def _finalize(mysettings, items):
+ if len(items) == 0:
+ return
+ elif len(items) == 1:
+ count = _("one package")
+ else:
+ count = _("multiple packages")
+ if "PORTAGE_ELOG_MAILURI" in mysettings:
+ myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
+ else:
+ myrecipient = "root@localhost"
+
+ myfrom = mysettings.get("PORTAGE_ELOG_MAILFROM", "")
+ myfrom = myfrom.replace("${HOST}", socket.getfqdn())
+ mysubject = mysettings.get("PORTAGE_ELOG_MAILSUBJECT", "")
+ mysubject = mysubject.replace("${PACKAGE}", count)
+ mysubject = mysubject.replace("${HOST}", socket.getfqdn())
+
+ mybody = _("elog messages for the following packages generated by "
+ "process %(pid)d on host %(host)s:\n") % {"pid": os.getpid(), "host": socket.getfqdn()}
+ for key in items:
+ mybody += "- %s\n" % key
+
+ mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject,
+ mybody, attachments=list(items.values()))
+
+ # Timeout after one minute in case send_mail() blocks indefinitely.
+ try:
+ try:
+ AlarmSignal.register(60)
+ portage.mail.send_mail(mysettings, mymessage)
+ finally:
+ AlarmSignal.unregister()
+ except AlarmSignal:
+ writemsg("Timeout in finalize() for elog system 'mail_summary'\n",
+ noiselevel=-1)
+ except PortageException as e:
+ writemsg("%s\n" % (e,), noiselevel=-1)
+
+ return
diff --git a/portage_with_autodep/pym/portage/elog/mod_save.py b/portage_with_autodep/pym/portage/elog/mod_save.py
new file mode 100644
index 0000000..9350a6e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_save.py
@@ -0,0 +1,51 @@
+# elog/mod_save.py - elog dispatch module
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import time
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import portage_gid, portage_uid
+from portage.package.ebuild.prepare_build_dirs import _ensure_log_subdirs
+from portage.util import ensure_dirs, normalize_path
+
+def process(mysettings, key, logentries, fulltext):
+
+ if mysettings.get("PORT_LOGDIR"):
+ logdir = normalize_path(mysettings["PORT_LOGDIR"])
+ else:
+ logdir = os.path.join(os.sep, "var", "log", "portage")
+
+ if not os.path.isdir(logdir):
+ # Only initialize group/mode if the directory doesn't
+ # exist, so that we don't override permissions if they
+ # were previously set by the administrator.
+ # NOTE: These permissions should be compatible with our
+ # default logrotate config as discussed in bug 374287.
+ ensure_dirs(logdir, uid=portage_uid, gid=portage_gid, mode=0o2770)
+
+ cat = mysettings['CATEGORY']
+ pf = mysettings['PF']
+
+ elogfilename = pf + ":" + _unicode_decode(
+ time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time())),
+ encoding=_encodings['content'], errors='replace') + ".log"
+
+ if "split-elog" in mysettings.features:
+ log_subdir = os.path.join(logdir, "elog", cat)
+ elogfilename = os.path.join(log_subdir, elogfilename)
+ else:
+ log_subdir = os.path.join(logdir, "elog")
+ elogfilename = os.path.join(log_subdir, cat + ':' + elogfilename)
+ _ensure_log_subdirs(logdir, log_subdir)
+
+ elogfile = io.open(_unicode_encode(elogfilename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'], errors='backslashreplace')
+ elogfile.write(_unicode_decode(fulltext))
+ elogfile.close()
+
+ return elogfilename
diff --git a/portage_with_autodep/pym/portage/elog/mod_save_summary.py b/portage_with_autodep/pym/portage/elog/mod_save_summary.py
new file mode 100644
index 0000000..4adc6f3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_save_summary.py
@@ -0,0 +1,59 @@
+# elog/mod_save_summary.py - elog dispatch module
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import time
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import portage_gid, portage_uid
+from portage.localization import _
+from portage.package.ebuild.prepare_build_dirs import _ensure_log_subdirs
+from portage.util import apply_permissions, ensure_dirs, normalize_path
+
+def process(mysettings, key, logentries, fulltext):
+ if mysettings.get("PORT_LOGDIR"):
+ logdir = normalize_path(mysettings["PORT_LOGDIR"])
+ else:
+ logdir = os.path.join(os.sep, "var", "log", "portage")
+
+ if not os.path.isdir(logdir):
+ # Only initialize group/mode if the directory doesn't
+ # exist, so that we don't override permissions if they
+ # were previously set by the administrator.
+ # NOTE: These permissions should be compatible with our
+ # default logrotate config as discussed in bug 374287.
+ ensure_dirs(logdir, uid=portage_uid, gid=portage_gid, mode=0o2770)
+
+ elogdir = os.path.join(logdir, "elog")
+ _ensure_log_subdirs(logdir, elogdir)
+
+ # TODO: Locking
+ elogfilename = elogdir+"/summary.log"
+ elogfile = io.open(_unicode_encode(elogfilename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'], errors='backslashreplace')
+
+ # Copy group permission bits from parent directory.
+ elogdir_st = os.stat(elogdir)
+ elogdir_gid = elogdir_st.st_gid
+ elogdir_grp_mode = 0o060 & elogdir_st.st_mode
+ apply_permissions(elogfilename, gid=elogdir_gid,
+ mode=elogdir_grp_mode, mask=0)
+
+ time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z",
+ time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError later.
+ time_str = _unicode_decode(time_str,
+ encoding=_encodings['content'], errors='replace')
+ elogfile.write(_unicode_decode(
+ _(">>> Messages generated by process " +
+ "%(pid)d on %(time)s for package %(pkg)s:\n\n") %
+ {"pid": os.getpid(), "time": time_str, "pkg": key}))
+ elogfile.write(_unicode_decode(fulltext))
+ elogfile.write(_unicode_decode("\n"))
+ elogfile.close()
+
+ return elogfilename
diff --git a/portage_with_autodep/pym/portage/elog/mod_syslog.py b/portage_with_autodep/pym/portage/elog/mod_syslog.py
new file mode 100644
index 0000000..d71dab4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_syslog.py
@@ -0,0 +1,32 @@
+# elog/mod_syslog.py - elog dispatch module
+# Copyright 2006-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+import syslog
+from portage.const import EBUILD_PHASES
+from portage import _encodings
+
+_pri = {
+ "INFO" : syslog.LOG_INFO,
+ "WARN" : syslog.LOG_WARNING,
+ "ERROR" : syslog.LOG_ERR,
+ "LOG" : syslog.LOG_NOTICE,
+ "QA" : syslog.LOG_WARNING
+}
+
+def process(mysettings, key, logentries, fulltext):
+ syslog.openlog("portage", syslog.LOG_ERR | syslog.LOG_WARNING | syslog.LOG_INFO | syslog.LOG_NOTICE, syslog.LOG_LOCAL5)
+ for phase in EBUILD_PHASES:
+ if not phase in logentries:
+ continue
+ for msgtype,msgcontent in logentries[phase]:
+ msgtext = "".join(msgcontent)
+ for line in msgtext.splitlines():
+ line = "%s: %s: %s" % (key, phase, line)
+ if sys.hexversion < 0x3000000 and isinstance(msgtext, unicode):
+ # Avoid TypeError from syslog.syslog()
+ line = line.encode(_encodings['content'],
+ 'backslashreplace')
+ syslog.syslog(_pri[msgtype], line)
+ syslog.closelog()
diff --git a/portage_with_autodep/pym/portage/env/__init__.py b/portage_with_autodep/pym/portage/env/__init__.py
new file mode 100644
index 0000000..17b66d1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/env/__init__.py
@@ -0,0 +1,3 @@
+# Copyright: 2007 Gentoo Foundation
+# License: GPL2
+
diff --git a/portage_with_autodep/pym/portage/env/config.py b/portage_with_autodep/pym/portage/env/config.py
new file mode 100644
index 0000000..865d835
--- /dev/null
+++ b/portage_with_autodep/pym/portage/env/config.py
@@ -0,0 +1,105 @@
+# config.py -- Portage Config
+# Copyright 2007-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["ConfigLoaderKlass", "GenericFile", "PackageKeywordsFile",
+ "PackageUseFile", "PackageMaskFile", "PortageModulesFile"]
+
+from portage.cache.mappings import UserDict
+from portage.env.loaders import KeyListFileLoader, KeyValuePairFileLoader, ItemFileLoader
+
+class ConfigLoaderKlass(UserDict):
+ """
+ A base class stub for things to inherit from.
+ Users may want a non-file backend.
+ """
+
+ def __init__(self, loader):
+ """
+ @param loader: A class that has a load() that returns two dicts
+ the first being a data dict, the second being a dict of errors.
+ """
+ UserDict.__init__(self)
+ self._loader = loader
+
+ def load(self):
+ """
+ Load the data from the loader.
+
+ @throws LoaderError:
+ """
+
+ self.data, self.errors = self._loader.load()
+
+class GenericFile(UserDict):
+ """
+ Inherits from ConfigLoaderKlass, attempts to use all known loaders
+ until it gets <something> in data. This is probably really slow but is
+ helpful when you really have no idea what you are loading (hint hint the file
+ should perhaps declare what type it is? ;)
+ """
+
+ loaders = [KeyListFileLoader, KeyValuePairFileLoader, ItemFileLoader]
+
+ def __init__(self, filename):
+ UserDict.__init__(self)
+ self.filename = filename
+
+ def load(self):
+ for loader in self.loaders:
+ l = loader(self.filename, None)
+ data, errors = l.load()
+ if len(data) and not len(errors):
+ (self.data, self.errors) = (data, errors)
+ return
+
+
+class PackageKeywordsFile(ConfigLoaderKlass):
+ """
+ Inherits from ConfigLoaderKlass; implements a file-based backend.
+ """
+
+ default_loader = KeyListFileLoader
+
+ def __init__(self, filename):
+ super(PackageKeywordsFile, self).__init__(
+ self.default_loader(filename, validator=None))
+
+class PackageUseFile(ConfigLoaderKlass):
+ """
+ Inherits from PackageUse; implements a file-based backend. Doesn't handle recursion yet.
+ """
+
+ default_loader = KeyListFileLoader
+ def __init__(self, filename):
+ super(PackageUseFile, self).__init__(
+ self.default_loader(filename, validator=None))
+
+class PackageMaskFile(ConfigLoaderKlass):
+ """
+ A class that implements a file-based package.mask
+
+ Entires in package.mask are of the form:
+ atom1
+ atom2
+ or optionally
+ -atom3
+ to revert a previous mask; this only works when masking files are stacked
+ """
+
+ default_loader = ItemFileLoader
+
+ def __init__(self, filename):
+ super(PackageMaskFile, self).__init__(
+ self.default_loader(filename, validator=None))
+
+class PortageModulesFile(ConfigLoaderKlass):
+ """
+ File Class for /etc/portage/modules
+ """
+
+ default_loader = KeyValuePairFileLoader
+
+ def __init__(self, filename):
+ super(PortageModulesFile, self).__init__(
+ self.default_loader(filename, validator=None))
diff --git a/portage_with_autodep/pym/portage/env/loaders.py b/portage_with_autodep/pym/portage/env/loaders.py
new file mode 100644
index 0000000..b540fbb
--- /dev/null
+++ b/portage_with_autodep/pym/portage/env/loaders.py
@@ -0,0 +1,319 @@
+# config.py -- Portage Config
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import stat
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.localization import _
+
+class LoaderError(Exception):
+
+ def __init__(self, resource, error_msg):
+ """
+ @param resource: Resource that failed to load (file/sql/etc)
+ @type resource: String
+ @param error_msg: Error from underlying Loader system
+ @type error_msg: String
+ """
+
+ self.resource = resource
+ self.error_msg = error_msg
+
+ def __str__(self):
+ return "Failed while loading resource: %s, error was: %s" % (
+ self.resource, self.error_msg)
+
+
+def RecursiveFileLoader(filename):
+ """
+ If filename is of type file, return a generate that yields filename
+ else if filename is of type directory, return a generator that fields
+ files in that directory.
+
+ Ignore files beginning with . or ending in ~.
+ Prune CVS directories.
+
+ @param filename: name of a file/directory to traverse
+ @rtype: list
+ @returns: List of files to process
+ """
+
+ try:
+ st = os.stat(filename)
+ except OSError:
+ return
+ if stat.S_ISDIR(st.st_mode):
+ for root, dirs, files in os.walk(filename):
+ for d in list(dirs):
+ if d[:1] == '.' or d == 'CVS':
+ dirs.remove(d)
+ for f in files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if f[:1] == '.' or f[-1:] == '~':
+ continue
+ yield os.path.join(root, f)
+ else:
+ yield filename
+
+
+class DataLoader(object):
+
+ def __init__(self, validator):
+ f = validator
+ if f is None:
+ # if they pass in no validator, just make a fake one
+ # that always returns true
+ def validate(key):
+ return True
+ f = validate
+ self._validate = f
+
+ def load(self):
+ """
+ Function to do the actual work of a Loader
+ """
+ raise NotImplementedError("Please override in a subclass")
+
+class EnvLoader(DataLoader):
+ """ Class to access data in the environment """
+ def __init__(self, validator):
+ DataLoader.__init__(self, validator)
+
+ def load(self):
+ return os.environ
+
+class TestTextLoader(DataLoader):
+ """ You give it some data, it 'loads' it for you, no filesystem access
+ """
+ def __init__(self, validator):
+ DataLoader.__init__(self, validator)
+ self.data = {}
+ self.errors = {}
+
+ def setData(self, text):
+ """Explicitly set the data field
+ Args:
+ text - a dict of data typical of Loaders
+ Returns:
+ None
+ """
+ if isinstance(text, dict):
+ self.data = text
+ else:
+ raise ValueError("setData requires a dict argument")
+
+ def setErrors(self, errors):
+ self.errors = errors
+
+ def load(self):
+ return (self.data, self.errors)
+
+
+class FileLoader(DataLoader):
+ """ Class to access data in files """
+
+ def __init__(self, filename, validator):
+ """
+ Args:
+ filename : Name of file or directory to open
+ validator : class with validate() method to validate data.
+ """
+ DataLoader.__init__(self, validator)
+ self.fname = filename
+
+ def load(self):
+ """
+ Return the {source: {key: value}} pairs from a file
+ Return the {source: [list of errors] from a load
+
+ @param recursive: If set and self.fname is a directory;
+ load all files in self.fname
+ @type: Boolean
+ @rtype: tuple
+ @returns:
+ Returns (data,errors), both may be empty dicts or populated.
+ """
+ data = {}
+ errors = {}
+ # I tried to save a nasty lookup on lineparser by doing the lookup
+ # once, which may be expensive due to digging in child classes.
+ func = self.lineParser
+ for fn in RecursiveFileLoader(self.fname):
+ try:
+ f = io.open(_unicode_encode(fn,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['content'], errors='replace')
+ except EnvironmentError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ continue
+ for line_num, line in enumerate(f):
+ func(line, line_num, data, errors)
+ f.close()
+ return (data, errors)
+
+ def lineParser(self, line, line_num, data, errors):
+ """ This function parses 1 line at a time
+ Args:
+ line: a string representing 1 line of a file
+ line_num: an integer representing what line we are processing
+ data: a dict that contains the data we have extracted from the file
+ already
+ errors: a dict representing parse errors.
+ Returns:
+ Nothing (None). Writes to data and errors
+ """
+ raise NotImplementedError("Please over-ride this in a child class")
+
+class ItemFileLoader(FileLoader):
+ """
+ Class to load data from a file full of items one per line
+
+ >>> item1
+ >>> item2
+ >>> item3
+ >>> item1
+
+ becomes { 'item1':None, 'item2':None, 'item3':None }
+ Note that due to the data store being a dict, duplicates
+ are removed.
+ """
+
+ def __init__(self, filename, validator):
+ FileLoader.__init__(self, filename, validator)
+
+ def lineParser(self, line, line_num, data, errors):
+ line = line.strip()
+ if line.startswith('#'): # Skip commented lines
+ return
+ if not len(line): # skip empty lines
+ return
+ split = line.split()
+ if not len(split):
+ errors.setdefault(self.fname, []).append(
+ _("Malformed data at line: %s, data: %s")
+ % (line_num + 1, line))
+ return
+ key = split[0]
+ if not self._validate(key):
+ errors.setdefault(self.fname, []).append(
+ _("Validation failed at line: %s, data %s")
+ % (line_num + 1, key))
+ return
+ data[key] = None
+
+class KeyListFileLoader(FileLoader):
+ """
+ Class to load data from a file full of key [list] tuples
+
+ >>>>key foo1 foo2 foo3
+ becomes
+ {'key':['foo1','foo2','foo3']}
+ """
+
+ def __init__(self, filename, validator=None, valuevalidator=None):
+ FileLoader.__init__(self, filename, validator)
+
+ f = valuevalidator
+ if f is None:
+ # if they pass in no validator, just make a fake one
+ # that always returns true
+ def validate(key):
+ return True
+ f = validate
+ self._valueValidate = f
+
+ def lineParser(self, line, line_num, data, errors):
+ line = line.strip()
+ if line.startswith('#'): # Skip commented lines
+ return
+ if not len(line): # skip empty lines
+ return
+ split = line.split()
+ if len(split) < 1:
+ errors.setdefault(self.fname, []).append(
+ _("Malformed data at line: %s, data: %s")
+ % (line_num + 1, line))
+ return
+ key = split[0]
+ value = split[1:]
+ if not self._validate(key):
+ errors.setdefault(self.fname, []).append(
+ _("Key validation failed at line: %s, data %s")
+ % (line_num + 1, key))
+ return
+ if not self._valueValidate(value):
+ errors.setdefault(self.fname, []).append(
+ _("Value validation failed at line: %s, data %s")
+ % (line_num + 1, value))
+ return
+ if key in data:
+ data[key].append(value)
+ else:
+ data[key] = value
+
+
+class KeyValuePairFileLoader(FileLoader):
+ """
+ Class to load data from a file full of key=value pairs
+
+ >>>>key=value
+ >>>>foo=bar
+ becomes:
+ {'key':'value',
+ 'foo':'bar'}
+ """
+
+ def __init__(self, filename, validator, valuevalidator=None):
+ FileLoader.__init__(self, filename, validator)
+
+ f = valuevalidator
+ if f is None:
+ # if they pass in no validator, just make a fake one
+ # that always returns true
+ def validate(key):
+ return True
+ f = validate
+ self._valueValidate = f
+
+
+ def lineParser(self, line, line_num, data, errors):
+ line = line.strip()
+ if line.startswith('#'): # skip commented lines
+ return
+ if not len(line): # skip empty lines
+ return
+ split = line.split('=', 1)
+ if len(split) < 2:
+ errors.setdefault(self.fname, []).append(
+ _("Malformed data at line: %s, data %s")
+ % (line_num + 1, line))
+ return
+ key = split[0].strip()
+ value = split[1].strip()
+ if not key:
+ errors.setdefault(self.fname, []).append(
+ _("Malformed key at line: %s, key %s")
+ % (line_num + 1, key))
+ return
+ if not self._validate(key):
+ errors.setdefault(self.fname, []).append(
+ _("Key validation failed at line: %s, data %s")
+ % (line_num + 1, key))
+ return
+ if not self._valueValidate(value):
+ errors.setdefault(self.fname, []).append(
+ _("Value validation failed at line: %s, data %s")
+ % (line_num + 1, value))
+ return
+ data[key] = value
diff --git a/portage_with_autodep/pym/portage/env/validators.py b/portage_with_autodep/pym/portage/env/validators.py
new file mode 100644
index 0000000..4d11d69
--- /dev/null
+++ b/portage_with_autodep/pym/portage/env/validators.py
@@ -0,0 +1,20 @@
+# validators.py Portage File Loader Code
+# Copyright 2007 Gentoo Foundation
+
+from portage.dep import isvalidatom
+
+ValidAtomValidator = isvalidatom
+
+def PackagesFileValidator(atom):
+ """ This function mutates atoms that begin with - or *
+ It then checks to see if that atom is valid, and if
+ so returns True, else it returns False.
+
+ Args:
+ atom: a string representing an atom such as sys-apps/portage-2.1
+ """
+ if atom.startswith("*") or atom.startswith("-"):
+ atom = atom[1:]
+ if not isvalidatom(atom):
+ return False
+ return True
diff --git a/portage_with_autodep/pym/portage/exception.py b/portage_with_autodep/pym/portage/exception.py
new file mode 100644
index 0000000..7891120
--- /dev/null
+++ b/portage_with_autodep/pym/portage/exception.py
@@ -0,0 +1,186 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import sys
+from portage import _encodings, _unicode_encode, _unicode_decode
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class PortageException(Exception):
+ """General superclass for portage exceptions"""
+ def __init__(self,value):
+ self.value = value[:]
+ if isinstance(self.value, basestring):
+ self.value = _unicode_decode(self.value,
+ encoding=_encodings['content'], errors='replace')
+
+ def __str__(self):
+ if isinstance(self.value, basestring):
+ return self.value
+ else:
+ return _unicode_decode(repr(self.value),
+ encoding=_encodings['content'], errors='replace')
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
+class CorruptionError(PortageException):
+ """Corruption indication"""
+
+class InvalidDependString(PortageException):
+ """An invalid depend string has been encountered"""
+ def __init__(self, value, errors=None):
+ PortageException.__init__(self, value)
+ self.errors = errors
+
+class InvalidVersionString(PortageException):
+ """An invalid version string has been encountered"""
+
+class SecurityViolation(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+
+class IncorrectParameter(PortageException):
+ """A parameter of the wrong type was passed"""
+
+class MissingParameter(PortageException):
+ """A parameter is required for the action requested but was not passed"""
+
+class ParseError(PortageException):
+ """An error was generated while attempting to parse the request"""
+
+class InvalidData(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+ def __init__(self, value, category=None):
+ PortageException.__init__(self, value)
+ self.category = category
+
+class InvalidDataType(PortageException):
+ """An incorrect type was passed instead of the expected one"""
+
+class InvalidLocation(PortageException):
+ """Data was not found when it was expected to exist or was specified incorrectly"""
+
+class FileNotFound(InvalidLocation):
+ """A file was not found when it was expected to exist"""
+
+class DirectoryNotFound(InvalidLocation):
+ """A directory was not found when it was expected to exist"""
+
+class OperationNotPermitted(PortageException):
+ from errno import EPERM as errno
+ """An operation was not permitted operating system"""
+
+class PermissionDenied(PortageException):
+ from errno import EACCES as errno
+ """Permission denied"""
+
+class TryAgain(PortageException):
+ from errno import EAGAIN as errno
+ """Try again"""
+
+class TimeoutException(PortageException):
+ """Operation timed out"""
+ # NOTE: ETIME is undefined on FreeBSD (bug #336875)
+ #from errno import ETIME as errno
+
+class AlarmSignal(TimeoutException):
+ def __init__(self, value, signum=None, frame=None):
+ TimeoutException.__init__(self, value)
+ self.signum = signum
+ self.frame = frame
+
+ @classmethod
+ def register(cls, time):
+ signal.signal(signal.SIGALRM, cls._signal_handler)
+ signal.alarm(time)
+
+ @classmethod
+ def unregister(cls):
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, signal.SIG_DFL)
+
+ @classmethod
+ def _signal_handler(cls, signum, frame):
+ signal.signal(signal.SIGALRM, signal.SIG_DFL)
+ raise AlarmSignal("alarm signal",
+ signum=signum, frame=frame)
+
+class ReadOnlyFileSystem(PortageException):
+ """Read-only file system"""
+
+class CommandNotFound(PortageException):
+ """A required binary was not available or executable"""
+
+class AmbiguousPackageName(ValueError, PortageException):
+ """Raised by portage.cpv_expand() when the package name is ambiguous due
+ to the existence of multiple matches in different categories. This inherits
+ from ValueError, for backward compatibility with calling code that already
+ handles ValueError."""
+ def __str__(self):
+ return ValueError.__str__(self)
+
+class PortagePackageException(PortageException):
+ """Malformed or missing package data"""
+
+class PackageNotFound(PortagePackageException):
+ """Missing Ebuild or Binary"""
+
+class PackageSetNotFound(PortagePackageException):
+ """Missing package set"""
+
+class InvalidPackageName(PortagePackageException):
+ """Malformed package name"""
+
+class InvalidAtom(PortagePackageException):
+ """Malformed atom spec"""
+ def __init__(self, value, category=None):
+ PortagePackageException.__init__(self, value)
+ self.category = category
+
+class UnsupportedAPIException(PortagePackageException):
+ """Unsupported API"""
+ def __init__(self, cpv, eapi):
+ self.cpv, self.eapi = cpv, eapi
+ def __str__(self):
+ eapi = self.eapi
+ if not isinstance(eapi, basestring):
+ eapi = str(eapi)
+ eapi = eapi.lstrip("-")
+ msg = _("Unable to do any operations on '%(cpv)s', since "
+ "its EAPI is higher than this portage version's. Please upgrade"
+ " to a portage version that supports EAPI '%(eapi)s'.") % \
+ {"cpv": self.cpv, "eapi": eapi}
+ return _unicode_decode(msg,
+ encoding=_encodings['content'], errors='replace')
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
+class SignatureException(PortageException):
+ """Signature was not present in the checked file"""
+
+class DigestException(SignatureException):
+ """A problem exists in the digest"""
+
+class MissingSignature(SignatureException):
+ """Signature was not present in the checked file"""
+
+class InvalidSignature(SignatureException):
+ """Signature was checked and was not a valid, current, nor trusted signature"""
+
+class UntrustedSignature(SignatureException):
+ """Signature was not certified to the desired security level"""
+
diff --git a/portage_with_autodep/pym/portage/getbinpkg.py b/portage_with_autodep/pym/portage/getbinpkg.py
new file mode 100644
index 0000000..a511f51
--- /dev/null
+++ b/portage_with_autodep/pym/portage/getbinpkg.py
@@ -0,0 +1,861 @@
+# getbinpkg.py -- Portage binary-package helper functions
+# Copyright 2003-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.output import colorize
+from portage.cache.mappings import slot_dict_class
+from portage.localization import _
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+import sys
+import socket
+import time
+import tempfile
+import base64
+
+_all_errors = [NotImplementedError, ValueError, socket.error]
+
+try:
+ from html.parser import HTMLParser as html_parser_HTMLParser
+except ImportError:
+ from HTMLParser import HTMLParser as html_parser_HTMLParser
+
+try:
+ from urllib.parse import unquote as urllib_parse_unquote
+except ImportError:
+ from urllib2 import unquote as urllib_parse_unquote
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ import ftplib
+except ImportError as e:
+ sys.stderr.write(colorize("BAD","!!! CANNOT IMPORT FTPLIB: ")+str(e)+"\n")
+else:
+ _all_errors.extend(ftplib.all_errors)
+
+try:
+ try:
+ from http.client import HTTPConnection as http_client_HTTPConnection
+ from http.client import BadStatusLine as http_client_BadStatusLine
+ from http.client import ResponseNotReady as http_client_ResponseNotReady
+ from http.client import error as http_client_error
+ except ImportError:
+ from httplib import HTTPConnection as http_client_HTTPConnection
+ from httplib import BadStatusLine as http_client_BadStatusLine
+ from httplib import ResponseNotReady as http_client_ResponseNotReady
+ from httplib import error as http_client_error
+except ImportError as e:
+ sys.stderr.write(colorize("BAD","!!! CANNOT IMPORT HTTP.CLIENT: ")+str(e)+"\n")
+else:
+ _all_errors.append(http_client_error)
+
+_all_errors = tuple(_all_errors)
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+def make_metadata_dict(data):
+ myid,myglob = data
+
+ mydict = {}
+ for x in portage.xpak.getindex_mem(myid):
+ mydict[x] = portage.xpak.getitem(data,x)
+
+ return mydict
+
+class ParseLinks(html_parser_HTMLParser):
+ """Parser class that overrides HTMLParser to grab all anchors from an html
+ page and provide suffix and prefix limitors"""
+ def __init__(self):
+ self.PL_anchors = []
+ html_parser_HTMLParser.__init__(self)
+
+ def get_anchors(self):
+ return self.PL_anchors
+
+ def get_anchors_by_prefix(self,prefix):
+ newlist = []
+ for x in self.PL_anchors:
+ if x.startswith(prefix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def get_anchors_by_suffix(self,suffix):
+ newlist = []
+ for x in self.PL_anchors:
+ if x.endswith(suffix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def handle_endtag(self,tag):
+ pass
+
+ def handle_starttag(self,tag,attrs):
+ if tag == "a":
+ for x in attrs:
+ if x[0] == 'href':
+ if x[1] not in self.PL_anchors:
+ self.PL_anchors.append(urllib_parse_unquote(x[1]))
+
+
+def create_conn(baseurl,conn=None):
+ """(baseurl,conn) --- Takes a protocol://site:port/address url, and an
+ optional connection. If connection is already active, it is passed on.
+ baseurl is reduced to address and is returned in tuple (conn,address)"""
+
+ parts = baseurl.split("://",1)
+ if len(parts) != 2:
+ raise ValueError(_("Provided URI does not "
+ "contain protocol identifier. '%s'") % baseurl)
+ protocol,url_parts = parts
+ del parts
+
+ url_parts = url_parts.split("/")
+ host = url_parts[0]
+ if len(url_parts) < 2:
+ address = "/"
+ else:
+ address = "/"+"/".join(url_parts[1:])
+ del url_parts
+
+ userpass_host = host.split("@",1)
+ if len(userpass_host) == 1:
+ host = userpass_host[0]
+ userpass = ["anonymous"]
+ else:
+ host = userpass_host[1]
+ userpass = userpass_host[0].split(":")
+ del userpass_host
+
+ if len(userpass) > 2:
+ raise ValueError(_("Unable to interpret username/password provided."))
+ elif len(userpass) == 2:
+ username = userpass[0]
+ password = userpass[1]
+ elif len(userpass) == 1:
+ username = userpass[0]
+ password = None
+ del userpass
+
+ http_headers = {}
+ http_params = {}
+ if username and password:
+ http_headers = {
+ "Authorization": "Basic %s" %
+ base64.encodestring("%s:%s" % (username, password)).replace(
+ "\012",
+ ""
+ ),
+ }
+
+ if not conn:
+ if protocol == "https":
+ # Use local import since https typically isn't needed, and
+ # this way we can usually avoid triggering the global scope
+ # http.client ImportError handler (like during stage1 -> stage2
+ # builds where USE=ssl is disabled for python).
+ try:
+ try:
+ from http.client import HTTPSConnection as http_client_HTTPSConnection
+ except ImportError:
+ from httplib import HTTPSConnection as http_client_HTTPSConnection
+ except ImportError:
+ raise NotImplementedError(
+ _("python must have ssl enabled for https support"))
+ conn = http_client_HTTPSConnection(host)
+ elif protocol == "http":
+ conn = http_client_HTTPConnection(host)
+ elif protocol == "ftp":
+ passive = 1
+ if(host[-1] == "*"):
+ passive = 0
+ host = host[:-1]
+ conn = ftplib.FTP(host)
+ if password:
+ conn.login(username,password)
+ else:
+ sys.stderr.write(colorize("WARN",
+ _(" * No password provided for username"))+" '%s'" % \
+ (username,) + "\n\n")
+ conn.login(username)
+ conn.set_pasv(passive)
+ conn.set_debuglevel(0)
+ elif protocol == "sftp":
+ try:
+ import paramiko
+ except ImportError:
+ raise NotImplementedError(
+ _("paramiko must be installed for sftp support"))
+ t = paramiko.Transport(host)
+ t.connect(username=username, password=password)
+ conn = paramiko.SFTPClient.from_transport(t)
+ else:
+ raise NotImplementedError(_("%s is not a supported protocol.") % protocol)
+
+ return (conn,protocol,address, http_params, http_headers)
+
+def make_ftp_request(conn, address, rest=None, dest=None):
+ """(conn,address,rest) --- uses the conn object to request the data
+ from address and issuing a rest if it is passed."""
+ try:
+
+ if dest:
+ fstart_pos = dest.tell()
+
+ conn.voidcmd("TYPE I")
+ fsize = conn.size(address)
+
+ if (rest != None) and (rest < 0):
+ rest = fsize+int(rest)
+ if rest < 0:
+ rest = 0
+
+ if rest != None:
+ mysocket = conn.transfercmd("RETR "+str(address), rest)
+ else:
+ mysocket = conn.transfercmd("RETR "+str(address))
+
+ mydata = ""
+ while 1:
+ somedata = mysocket.recv(8192)
+ if somedata:
+ if dest:
+ dest.write(somedata)
+ else:
+ mydata = mydata + somedata
+ else:
+ break
+
+ if dest:
+ data_size = fstart_pos - dest.tell()
+ else:
+ data_size = len(mydata)
+
+ mysocket.close()
+ conn.voidresp()
+ conn.voidcmd("TYPE A")
+
+ return mydata,not (fsize==data_size),""
+
+ except ValueError as e:
+ return None,int(str(e)[:4]),str(e)
+
+
+def make_http_request(conn, address, params={}, headers={}, dest=None):
+ """(conn,address,params,headers) --- uses the conn object to request
+ the data from address, performing Location forwarding and using the
+ optional params and headers."""
+
+ rc = 0
+ response = None
+ while (rc == 0) or (rc == 301) or (rc == 302):
+ try:
+ if (rc != 0):
+ conn,ignore,ignore,ignore,ignore = create_conn(address)
+ conn.request("GET", address, body=None, headers=headers)
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ return None,None,"Server request failed: "+str(e)
+ response = conn.getresponse()
+ rc = response.status
+
+ # 301 means that the page address is wrong.
+ if ((rc == 301) or (rc == 302)):
+ ignored_data = response.read()
+ del ignored_data
+ for x in str(response.msg).split("\n"):
+ parts = x.split(": ",1)
+ if parts[0] == "Location":
+ if (rc == 301):
+ sys.stderr.write(colorize("BAD",
+ _("Location has moved: ")) + str(parts[1]) + "\n")
+ if (rc == 302):
+ sys.stderr.write(colorize("BAD",
+ _("Location has temporarily moved: ")) + \
+ str(parts[1]) + "\n")
+ address = parts[1]
+ break
+
+ if (rc != 200) and (rc != 206):
+ return None,rc,"Server did not respond successfully ("+str(response.status)+": "+str(response.reason)+")"
+
+ if dest:
+ dest.write(response.read())
+ return "",0,""
+
+ return response.read(),0,""
+
+
+def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
+ myarray = []
+
+ if not (prefix and suffix):
+ match_both = 0
+
+ for x in array:
+ add_p = 0
+ if prefix and (len(x) >= len(prefix)) and (x[:len(prefix)] == prefix):
+ add_p = 1
+
+ if match_both:
+ if prefix and not add_p: # Require both, but don't have first one.
+ continue
+ else:
+ if add_p: # Only need one, and we have it.
+ myarray.append(x[:])
+ continue
+
+ if not allow_overlap: # Not allow to overlap prefix and suffix
+ if len(x) >= (len(prefix)+len(suffix)):
+ pass
+ else:
+ continue # Too short to match.
+ else:
+ pass # Do whatever... We're overlapping.
+
+ if suffix and (len(x) >= len(suffix)) and (x[-len(suffix):] == suffix):
+ myarray.append(x) # It matches
+ else:
+ continue # Doesn't match.
+
+ return myarray
+
+
+
+def dir_get_list(baseurl,conn=None):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URI should be in the form <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ listing = None
+ if protocol in ["http","https"]:
+ if not address.endswith("/"):
+ # http servers can return a 400 error here
+ # if the address doesn't end with a slash.
+ address += "/"
+ page,rc,msg = make_http_request(conn,address,params,headers)
+
+ if page:
+ parser = ParseLinks()
+ parser.feed(page)
+ del page
+ listing = parser.get_anchors()
+ else:
+ import portage.exception
+ raise portage.exception.PortageException(
+ _("Unable to get listing: %s %s") % (rc,msg))
+ elif protocol in ["ftp"]:
+ if address[-1] == '/':
+ olddir = conn.pwd()
+ conn.cwd(address)
+ listing = conn.nlst()
+ conn.cwd(olddir)
+ del olddir
+ else:
+ listing = conn.nlst(address)
+ elif protocol == "sftp":
+ listing = conn.listdir(address)
+ else:
+ raise TypeError(_("Unknown protocol. '%s'") % protocol)
+
+ if not keepconnection:
+ conn.close()
+
+ return listing
+
+def file_get_metadata(baseurl,conn=None, chunk_size=3000):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URI should be in the form <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ if protocol in ["http","https"]:
+ headers["Range"] = "bytes=-"+str(chunk_size)
+ data,rc,msg = make_http_request(conn, address, params, headers)
+ elif protocol in ["ftp"]:
+ data,rc,msg = make_ftp_request(conn, address, -chunk_size)
+ elif protocol == "sftp":
+ f = conn.open(address)
+ try:
+ f.seek(-chunk_size, 2)
+ data = f.read()
+ finally:
+ f.close()
+ else:
+ raise TypeError(_("Unknown protocol. '%s'") % protocol)
+
+ if data:
+ xpaksize = portage.xpak.decodeint(data[-8:-4])
+ if (xpaksize+8) > chunk_size:
+ myid = file_get_metadata(baseurl, conn, (xpaksize+8))
+ if not keepconnection:
+ conn.close()
+ return myid
+ else:
+ xpak_data = data[len(data)-(xpaksize+8):-8]
+ del data
+
+ myid = portage.xpak.xsplit_mem(xpak_data)
+ if not myid:
+ myid = None,None
+ del xpak_data
+ else:
+ myid = None,None
+
+ if not keepconnection:
+ conn.close()
+
+ return myid
+
+
+def file_get(baseurl,dest,conn=None,fcmd=None,filename=None):
+ """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from.
+ URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>"""
+
+ if not fcmd:
+ return file_get_lib(baseurl,dest,conn)
+ if not filename:
+ filename = os.path.basename(baseurl)
+
+ variables = {
+ "DISTDIR": dest,
+ "URI": baseurl,
+ "FILE": filename
+ }
+
+ from portage.util import varexpand
+ from portage.process import spawn
+ myfetch = portage.util.shlex_split(fcmd)
+ myfetch = [varexpand(x, mydict=variables) for x in myfetch]
+ fd_pipes= {
+ 0:sys.stdin.fileno(),
+ 1:sys.stdout.fileno(),
+ 2:sys.stdout.fileno()
+ }
+ retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes)
+ if retval != os.EX_OK:
+ sys.stderr.write(_("Fetcher exited with a failure condition.\n"))
+ return 0
+ return 1
+
+def file_get_lib(baseurl,dest,conn=None):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URI should be in the form <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ sys.stderr.write("Fetching '"+str(os.path.basename(address)+"'\n"))
+ if protocol in ["http","https"]:
+ data,rc,msg = make_http_request(conn, address, params, headers, dest=dest)
+ elif protocol in ["ftp"]:
+ data,rc,msg = make_ftp_request(conn, address, dest=dest)
+ elif protocol == "sftp":
+ rc = 0
+ try:
+ f = conn.open(address)
+ except SystemExit:
+ raise
+ except Exception:
+ rc = 1
+ else:
+ try:
+ if dest:
+ bufsize = 8192
+ while True:
+ data = f.read(bufsize)
+ if not data:
+ break
+ dest.write(data)
+ finally:
+ f.close()
+ else:
+ raise TypeError(_("Unknown protocol. '%s'") % protocol)
+
+ if not keepconnection:
+ conn.close()
+
+ return rc
+
+
+def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
+ """(baseurl,conn,chunk_size,verbose) --
+ """
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ cache_path = "/var/cache/edb"
+ metadatafilename = os.path.join(cache_path, 'remote_metadata.pickle')
+
+ if makepickle is None:
+ makepickle = "/var/cache/edb/metadata.idx.most_recent"
+
+ try:
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
+ except _all_errors as e:
+ # ftplib.FTP(host) can raise errors like this:
+ # socket.error: (111, 'Connection refused')
+ sys.stderr.write("!!! %s\n" % (e,))
+ return {}
+
+ out = sys.stdout
+ try:
+ metadatafile = open(_unicode_encode(metadatafilename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mypickle = pickle.Unpickler(metadatafile)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ metadata = mypickle.load()
+ out.write(_("Loaded metadata pickle.\n"))
+ out.flush()
+ metadatafile.close()
+ except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError):
+ metadata = {}
+ if baseurl not in metadata:
+ metadata[baseurl]={}
+ if "indexname" not in metadata[baseurl]:
+ metadata[baseurl]["indexname"]=""
+ if "timestamp" not in metadata[baseurl]:
+ metadata[baseurl]["timestamp"]=0
+ if "unmodified" not in metadata[baseurl]:
+ metadata[baseurl]["unmodified"]=0
+ if "data" not in metadata[baseurl]:
+ metadata[baseurl]["data"]={}
+
+ if not os.access(cache_path, os.W_OK):
+ sys.stderr.write(_("!!! Unable to write binary metadata to disk!\n"))
+ sys.stderr.write(_("!!! Permission denied: '%s'\n") % cache_path)
+ return metadata[baseurl]["data"]
+
+ import portage.exception
+ try:
+ filelist = dir_get_list(baseurl, conn)
+ except portage.exception.PortageException as e:
+ sys.stderr.write(_("!!! Error connecting to '%s'.\n") % baseurl)
+ sys.stderr.write("!!! %s\n" % str(e))
+ del e
+ return metadata[baseurl]["data"]
+ tbz2list = match_in_array(filelist, suffix=".tbz2")
+ metalist = match_in_array(filelist, prefix="metadata.idx")
+ del filelist
+
+ # Determine if our metadata file is current.
+ metalist.sort()
+ metalist.reverse() # makes the order new-to-old.
+ for mfile in metalist:
+ if usingcache and \
+ ((metadata[baseurl]["indexname"] != mfile) or \
+ (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))):
+ # Try to download new cache until we succeed on one.
+ data=""
+ for trynum in [1,2,3]:
+ mytempfile = tempfile.TemporaryFile()
+ try:
+ file_get(baseurl+"/"+mfile, mytempfile, conn)
+ if mytempfile.tell() > len(data):
+ mytempfile.seek(0)
+ data = mytempfile.read()
+ except ValueError as e:
+ sys.stderr.write("--- "+str(e)+"\n")
+ if trynum < 3:
+ sys.stderr.write(_("Retrying...\n"))
+ sys.stderr.flush()
+ mytempfile.close()
+ continue
+ if match_in_array([mfile],suffix=".gz"):
+ out.write("gzip'd\n")
+ out.flush()
+ try:
+ import gzip
+ mytempfile.seek(0)
+ gzindex = gzip.GzipFile(mfile[:-3],'rb',9,mytempfile)
+ data = gzindex.read()
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ mytempfile.close()
+ sys.stderr.write(_("!!! Failed to use gzip: ")+str(e)+"\n")
+ sys.stderr.flush()
+ mytempfile.close()
+ try:
+ metadata[baseurl]["data"] = pickle.loads(data)
+ del data
+ metadata[baseurl]["indexname"] = mfile
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadata[baseurl]["modified"] = 0 # It's not, right after download.
+ out.write(_("Pickle loaded.\n"))
+ out.flush()
+ break
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ sys.stderr.write(_("!!! Failed to read data from index: ")+str(mfile)+"\n")
+ sys.stderr.write("!!! "+str(e)+"\n")
+ sys.stderr.flush()
+ try:
+ metadatafile = open(_unicode_encode(metadatafilename,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ pickle.dump(metadata, metadatafile, protocol=2)
+ metadatafile.close()
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
+ sys.stderr.write("!!! "+str(e)+"\n")
+ sys.stderr.flush()
+ break
+ # We may have metadata... now we run through the tbz2 list and check.
+
+ class CacheStats(object):
+ from time import time
+ def __init__(self, out):
+ self.misses = 0
+ self.hits = 0
+ self.last_update = 0
+ self.out = out
+ self.min_display_latency = 0.2
+ def update(self):
+ cur_time = self.time()
+ if cur_time - self.last_update >= self.min_display_latency:
+ self.last_update = cur_time
+ self.display()
+ def display(self):
+ self.out.write("\r"+colorize("WARN",
+ _("cache miss: '")+str(self.misses)+"'") + \
+ " --- "+colorize("GOOD", _("cache hit: '")+str(self.hits)+"'"))
+ self.out.flush()
+
+ cache_stats = CacheStats(out)
+ have_tty = os.environ.get('TERM') != 'dumb' and out.isatty()
+ if have_tty:
+ cache_stats.display()
+ binpkg_filenames = set()
+ for x in tbz2list:
+ x = os.path.basename(x)
+ binpkg_filenames.add(x)
+ if x not in metadata[baseurl]["data"]:
+ cache_stats.misses += 1
+ if have_tty:
+ cache_stats.update()
+ metadata[baseurl]["modified"] = 1
+ myid = None
+ for retry in range(3):
+ try:
+ myid = file_get_metadata(
+ "/".join((baseurl.rstrip("/"), x.lstrip("/"))),
+ conn, chunk_size)
+ break
+ except http_client_BadStatusLine:
+ # Sometimes this error is thrown from conn.getresponse() in
+ # make_http_request(). The docstring for this error in
+ # httplib.py says "Presumably, the server closed the
+ # connection before sending a valid response".
+ conn, protocol, address, params, headers = create_conn(
+ baseurl)
+ except http_client_ResponseNotReady:
+ # With some http servers this error is known to be thrown
+ # from conn.getresponse() in make_http_request() when the
+ # remote file does not have appropriate read permissions.
+ # Maybe it's possible to recover from this exception in
+ # cases though, so retry.
+ conn, protocol, address, params, headers = create_conn(
+ baseurl)
+
+ if myid and myid[0]:
+ metadata[baseurl]["data"][x] = make_metadata_dict(myid)
+ elif verbose:
+ sys.stderr.write(colorize("BAD",
+ _("!!! Failed to retrieve metadata on: "))+str(x)+"\n")
+ sys.stderr.flush()
+ else:
+ cache_stats.hits += 1
+ if have_tty:
+ cache_stats.update()
+ cache_stats.display()
+ # Cleanse stale cache for files that don't exist on the server anymore.
+ stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
+ if stale_cache:
+ for x in stale_cache:
+ del metadata[baseurl]["data"][x]
+ metadata[baseurl]["modified"] = 1
+ del stale_cache
+ del binpkg_filenames
+ out.write("\n")
+ out.flush()
+
+ try:
+ if "modified" in metadata[baseurl] and metadata[baseurl]["modified"]:
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadatafile = open(_unicode_encode(metadatafilename,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ pickle.dump(metadata, metadatafile, protocol=2)
+ metadatafile.close()
+ if makepickle:
+ metadatafile = open(_unicode_encode(makepickle,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ pickle.dump(metadata[baseurl]["data"], metadatafile, protocol=2)
+ metadatafile.close()
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
+ sys.stderr.write("!!! "+str(e)+"\n")
+ sys.stderr.flush()
+
+ if not keepconnection:
+ conn.close()
+
+ return metadata[baseurl]["data"]
+
+def _cmp_cpv(d1, d2):
+ cpv1 = d1["CPV"]
+ cpv2 = d2["CPV"]
+ if cpv1 > cpv2:
+ return 1
+ elif cpv1 == cpv2:
+ return 0
+ else:
+ return -1
+
+class PackageIndex(object):
+
+ def __init__(self,
+ allowed_pkg_keys=None,
+ default_header_data=None,
+ default_pkg_data=None,
+ inherited_keys=None,
+ translated_keys=None):
+
+ self._pkg_slot_dict = None
+ if allowed_pkg_keys is not None:
+ self._pkg_slot_dict = slot_dict_class(allowed_pkg_keys)
+
+ self._default_header_data = default_header_data
+ self._default_pkg_data = default_pkg_data
+ self._inherited_keys = inherited_keys
+ self._write_translation_map = {}
+ self._read_translation_map = {}
+ if translated_keys:
+ self._write_translation_map.update(translated_keys)
+ self._read_translation_map.update(((y, x) for (x, y) in translated_keys))
+ self.header = {}
+ if self._default_header_data:
+ self.header.update(self._default_header_data)
+ self.packages = []
+ self.modified = True
+
+ def _readpkgindex(self, pkgfile, pkg_entry=True):
+
+ allowed_keys = None
+ if self._pkg_slot_dict is None or not pkg_entry:
+ d = {}
+ else:
+ d = self._pkg_slot_dict()
+ allowed_keys = d.allowed_keys
+
+ for line in pkgfile:
+ line = line.rstrip("\n")
+ if not line:
+ break
+ line = line.split(":", 1)
+ if not len(line) == 2:
+ continue
+ k, v = line
+ if v:
+ v = v[1:]
+ k = self._read_translation_map.get(k, k)
+ if allowed_keys is not None and \
+ k not in allowed_keys:
+ continue
+ d[k] = v
+ return d
+
+ def _writepkgindex(self, pkgfile, items):
+ for k, v in items:
+ pkgfile.write("%s: %s\n" % \
+ (self._write_translation_map.get(k, k), v))
+ pkgfile.write("\n")
+
+ def read(self, pkgfile):
+ self.readHeader(pkgfile)
+ self.readBody(pkgfile)
+
+ def readHeader(self, pkgfile):
+ self.header.update(self._readpkgindex(pkgfile, pkg_entry=False))
+
+ def readBody(self, pkgfile):
+ while True:
+ d = self._readpkgindex(pkgfile)
+ if not d:
+ break
+ mycpv = d.get("CPV")
+ if not mycpv:
+ continue
+ if self._default_pkg_data:
+ for k, v in self._default_pkg_data.items():
+ d.setdefault(k, v)
+ if self._inherited_keys:
+ for k in self._inherited_keys:
+ v = self.header.get(k)
+ if v is not None:
+ d.setdefault(k, v)
+ self.packages.append(d)
+
+ def write(self, pkgfile):
+ if self.modified:
+ self.header["TIMESTAMP"] = str(long(time.time()))
+ self.header["PACKAGES"] = str(len(self.packages))
+ keys = list(self.header)
+ keys.sort()
+ self._writepkgindex(pkgfile, [(k, self.header[k]) \
+ for k in keys if self.header[k]])
+ for metadata in sorted(self.packages,
+ key=portage.util.cmp_sort_key(_cmp_cpv)):
+ metadata = metadata.copy()
+ cpv = metadata["CPV"]
+ if self._inherited_keys:
+ for k in self._inherited_keys:
+ v = self.header.get(k)
+ if v is not None and v == metadata.get(k):
+ del metadata[k]
+ if self._default_pkg_data:
+ for k, v in self._default_pkg_data.items():
+ if metadata.get(k) == v:
+ metadata.pop(k, None)
+ keys = list(metadata)
+ keys.sort()
+ self._writepkgindex(pkgfile,
+ [(k, metadata[k]) for k in keys if metadata[k]])
diff --git a/portage_with_autodep/pym/portage/glsa.py b/portage_with_autodep/pym/portage/glsa.py
new file mode 100644
index 0000000..a784d14
--- /dev/null
+++ b/portage_with_autodep/pym/portage/glsa.py
@@ -0,0 +1,699 @@
+# Copyright 2003-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import absolute_import
+
+import io
+import sys
+try:
+ from urllib.request import urlopen as urllib_request_urlopen
+except ImportError:
+ from urllib import urlopen as urllib_request_urlopen
+import re
+import xml.dom.minidom
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.versions import pkgsplit, catpkgsplit, pkgcmp, best
+from portage.util import grabfile
+from portage.const import CACHE_PATH
+from portage.localization import _
+from portage.dep import _slot_separator
+
+# Note: the space for rgt and rlt is important !!
+# FIXME: use slot deps instead, requires GLSA format versioning
+opMapping = {"le": "<=", "lt": "<", "eq": "=", "gt": ">", "ge": ">=",
+ "rge": ">=~", "rle": "<=~", "rgt": " >~", "rlt": " <~"}
+NEWLINE_ESCAPE = "!;\\n" # some random string to mark newlines that should be preserved
+SPACE_ESCAPE = "!;_" # some random string to mark spaces that should be preserved
+
+def get_applied_glsas(settings):
+ """
+ Return a list of applied or injected GLSA IDs
+
+ @type settings: portage.config
+ @param settings: portage config instance
+ @rtype: list
+ @return: list of glsa IDs
+ """
+ return grabfile(os.path.join(settings["EROOT"], CACHE_PATH, "glsa"))
+
+
+# TODO: use the textwrap module instead
+def wrap(text, width, caption=""):
+ """
+ Wraps the given text at column I{width}, optionally indenting
+ it so that no text is under I{caption}. It's possible to encode
+ hard linebreaks in I{text} with L{NEWLINE_ESCAPE}.
+
+ @type text: String
+ @param text: the text to be wrapped
+ @type width: Integer
+ @param width: the column at which the text should be wrapped
+ @type caption: String
+ @param caption: this string is inserted at the beginning of the
+ return value and the paragraph is indented up to
+ C{len(caption)}.
+ @rtype: String
+ @return: the wrapped and indented paragraph
+ """
+ rValue = ""
+ line = caption
+ text = text.replace(2*NEWLINE_ESCAPE, NEWLINE_ESCAPE+" "+NEWLINE_ESCAPE)
+ words = text.split()
+ indentLevel = len(caption)+1
+
+ for w in words:
+ if line != "" and line[-1] == "\n":
+ rValue += line
+ line = " "*indentLevel
+ if len(line)+len(w.replace(NEWLINE_ESCAPE, ""))+1 > width:
+ rValue += line+"\n"
+ line = " "*indentLevel+w.replace(NEWLINE_ESCAPE, "\n")
+ elif w.find(NEWLINE_ESCAPE) >= 0:
+ if len(line.strip()) > 0:
+ rValue += line+" "+w.replace(NEWLINE_ESCAPE, "\n")
+ else:
+ rValue += line+w.replace(NEWLINE_ESCAPE, "\n")
+ line = " "*indentLevel
+ else:
+ if len(line.strip()) > 0:
+ line += " "+w
+ else:
+ line += w
+ if len(line) > 0:
+ rValue += line.replace(NEWLINE_ESCAPE, "\n")
+ rValue = rValue.replace(SPACE_ESCAPE, " ")
+ return rValue
+
+def get_glsa_list(myconfig):
+ """
+ Returns a list of all available GLSAs in the given repository
+ by comparing the filelist there with the pattern described in
+ the config.
+
+ @type myconfig: portage.config
+ @param myconfig: Portage settings instance
+
+ @rtype: List of Strings
+ @return: a list of GLSA IDs in this repository
+ """
+ rValue = []
+
+ if "GLSA_DIR" in myconfig:
+ repository = myconfig["GLSA_DIR"]
+ else:
+ repository = os.path.join(myconfig["PORTDIR"], "metadata", "glsa")
+
+ if not os.access(repository, os.R_OK):
+ return []
+ dirlist = os.listdir(repository)
+ prefix = "glsa-"
+ suffix = ".xml"
+
+ for f in dirlist:
+ try:
+ if f[:len(prefix)] == prefix:
+ rValue.append(f[len(prefix):-1*len(suffix)])
+ except IndexError:
+ pass
+ return rValue
+
+def getListElements(listnode):
+ """
+ Get all <li> elements for a given <ol> or <ul> node.
+
+ @type listnode: xml.dom.Node
+ @param listnode: <ul> or <ol> list to get the elements for
+ @rtype: List of Strings
+ @return: a list that contains the value of the <li> elements
+ """
+ rValue = []
+ if not listnode.nodeName in ["ul", "ol"]:
+ raise GlsaFormatException("Invalid function call: listnode is not <ul> or <ol>")
+ for li in listnode.childNodes:
+ if li.nodeType != xml.dom.Node.ELEMENT_NODE:
+ continue
+ rValue.append(getText(li, format="strip"))
+ return rValue
+
+def getText(node, format):
+ """
+ This is the main parser function. It takes a node and traverses
+ recursive over the subnodes, getting the text of each (and the
+ I{link} attribute for <uri> and <mail>). Depending on the I{format}
+ parameter the text might be formatted by adding/removing newlines,
+ tabs and spaces. This function is only useful for the GLSA DTD,
+ it's not applicable for other DTDs.
+
+ @type node: xml.dom.Node
+ @param node: the root node to start with the parsing
+ @type format: String
+ @param format: this should be either I{strip}, I{keep} or I{xml}
+ I{keep} just gets the text and does no formatting.
+ I{strip} replaces newlines and tabs with spaces and
+ replaces multiple spaces with one space.
+ I{xml} does some more formatting, depending on the
+ type of the encountered nodes.
+ @rtype: String
+ @return: the (formatted) content of the node and its subnodes
+ """
+ rValue = ""
+ if format in ["strip", "keep"]:
+ if node.nodeName in ["uri", "mail"]:
+ rValue += node.childNodes[0].data+": "+node.getAttribute("link")
+ else:
+ for subnode in node.childNodes:
+ if subnode.nodeName == "#text":
+ rValue += subnode.data
+ else:
+ rValue += getText(subnode, format)
+ else:
+ for subnode in node.childNodes:
+ if subnode.nodeName == "p":
+ for p_subnode in subnode.childNodes:
+ if p_subnode.nodeName == "#text":
+ rValue += p_subnode.data.strip()
+ elif p_subnode.nodeName in ["uri", "mail"]:
+ rValue += p_subnode.childNodes[0].data
+ rValue += " ( "+p_subnode.getAttribute("link")+" )"
+ rValue += NEWLINE_ESCAPE
+ elif subnode.nodeName == "ul":
+ for li in getListElements(subnode):
+ rValue += "-"+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" "
+ elif subnode.nodeName == "ol":
+ i = 0
+ for li in getListElements(subnode):
+ i = i+1
+ rValue += str(i)+"."+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" "
+ elif subnode.nodeName == "code":
+ rValue += getText(subnode, format="keep").replace("\n", NEWLINE_ESCAPE)
+ if rValue[-1*len(NEWLINE_ESCAPE):] != NEWLINE_ESCAPE:
+ rValue += NEWLINE_ESCAPE
+ elif subnode.nodeName == "#text":
+ rValue += subnode.data
+ else:
+ raise GlsaFormatException(_("Invalid Tag found: "), subnode.nodeName)
+ if format == "strip":
+ rValue = rValue.strip(" \n\t")
+ rValue = re.sub("[\s]{2,}", " ", rValue)
+ return rValue
+
+def getMultiTagsText(rootnode, tagname, format):
+ """
+ Returns a list with the text of all subnodes of type I{tagname}
+ under I{rootnode} (which itself is not parsed) using the given I{format}.
+
+ @type rootnode: xml.dom.Node
+ @param rootnode: the node to search for I{tagname}
+ @type tagname: String
+ @param tagname: the name of the tags to search for
+ @type format: String
+ @param format: see L{getText}
+ @rtype: List of Strings
+ @return: a list containing the text of all I{tagname} childnodes
+ """
+ rValue = []
+ for e in rootnode.getElementsByTagName(tagname):
+ rValue.append(getText(e, format))
+ return rValue
+
+def makeAtom(pkgname, versionNode):
+ """
+ creates from the given package name and information in the
+ I{versionNode} a (syntactical) valid portage atom.
+
+ @type pkgname: String
+ @param pkgname: the name of the package for this atom
+ @type versionNode: xml.dom.Node
+ @param versionNode: a <vulnerable> or <unaffected> Node that
+ contains the version information for this atom
+ @rtype: String
+ @return: the portage atom
+ """
+ rValue = opMapping[versionNode.getAttribute("range")] \
+ + pkgname \
+ + "-" + getText(versionNode, format="strip")
+ try:
+ slot = versionNode.getAttribute("slot").strip()
+ except KeyError:
+ pass
+ else:
+ if slot and slot != "*":
+ rValue += _slot_separator + slot
+ return str(rValue)
+
+def makeVersion(versionNode):
+ """
+ creates from the information in the I{versionNode} a
+ version string (format <op><version>).
+
+ @type versionNode: xml.dom.Node
+ @param versionNode: a <vulnerable> or <unaffected> Node that
+ contains the version information for this atom
+ @rtype: String
+ @return: the version string
+ """
+ rValue = opMapping[versionNode.getAttribute("range")] \
+ + getText(versionNode, format="strip")
+ try:
+ slot = versionNode.getAttribute("slot").strip()
+ except KeyError:
+ pass
+ else:
+ if slot and slot != "*":
+ rValue += _slot_separator + slot
+ return rValue
+
+def match(atom, dbapi, match_type="default"):
+ """
+ wrapper that calls revisionMatch() or portage.dbapi.dbapi.match() depending on
+ the given atom.
+
+ @type atom: string
+ @param atom: a <~ or >~ atom or a normal portage atom that contains the atom to match against
+ @type dbapi: portage.dbapi.dbapi
+ @param dbapi: one of the portage databases to use as information source
+ @type match_type: string
+ @param match_type: if != "default" passed as first argument to dbapi.xmatch
+ to apply the wanted visibility filters
+
+ @rtype: list of strings
+ @return: a list with the matching versions
+ """
+ if atom[2] == "~":
+ return revisionMatch(atom, dbapi, match_type=match_type)
+ elif match_type == "default" or not hasattr(dbapi, "xmatch"):
+ return dbapi.match(atom)
+ else:
+ return dbapi.xmatch(match_type, atom)
+
+def revisionMatch(revisionAtom, dbapi, match_type="default"):
+ """
+ handler for the special >~, >=~, <=~ and <~ atoms that are supposed to behave
+ as > and < except that they are limited to the same version, the range only
+ applies to the revision part.
+
+ @type revisionAtom: string
+ @param revisionAtom: a <~ or >~ atom that contains the atom to match against
+ @type dbapi: portage.dbapi.dbapi
+ @param dbapi: one of the portage databases to use as information source
+ @type match_type: string
+ @param match_type: if != "default" passed as first argument to portdb.xmatch
+ to apply the wanted visibility filters
+
+ @rtype: list of strings
+ @return: a list with the matching versions
+ """
+ if match_type == "default" or not hasattr(dbapi, "xmatch"):
+ if ":" in revisionAtom:
+ mylist = dbapi.match(re.sub(r'-r[0-9]+(:[^ ]+)?$', r'\1', revisionAtom[2:]))
+ else:
+ mylist = dbapi.match(re.sub("-r[0-9]+$", "", revisionAtom[2:]))
+ else:
+ if ":" in revisionAtom:
+ mylist = dbapi.xmatch(match_type, re.sub(r'-r[0-9]+(:[^ ]+)?$', r'\1', revisionAtom[2:]))
+ else:
+ mylist = dbapi.xmatch(match_type, re.sub("-r[0-9]+$", "", revisionAtom[2:]))
+ rValue = []
+ for v in mylist:
+ r1 = pkgsplit(v)[-1][1:]
+ r2 = pkgsplit(revisionAtom[3:])[-1][1:]
+ if eval(r1+" "+revisionAtom[0:2]+" "+r2):
+ rValue.append(v)
+ return rValue
+
+
+def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize=True):
+ """
+ Checks if the systemstate is matching an atom in
+ I{vulnerableList} and returns string describing
+ the lowest version for the package that matches an atom in
+ I{unaffectedList} and is greater than the currently installed
+ version or None if the system is not affected. Both
+ I{vulnerableList} and I{unaffectedList} should have the
+ same base package.
+
+ @type vulnerableList: List of Strings
+ @param vulnerableList: atoms matching vulnerable package versions
+ @type unaffectedList: List of Strings
+ @param unaffectedList: atoms matching unaffected package versions
+ @type portdbapi: portage.dbapi.porttree.portdbapi
+ @param portdbapi: Ebuild repository
+ @type vardbapi: portage.dbapi.vartree.vardbapi
+ @param vardbapi: Installed package repository
+ @type minimize: Boolean
+ @param minimize: True for a least-change upgrade, False for emerge-like algorithm
+
+ @rtype: String | None
+ @return: the lowest unaffected version that is greater than
+ the installed version.
+ """
+ rValue = None
+ v_installed = []
+ u_installed = []
+ for v in vulnerableList:
+ v_installed += match(v, vardbapi)
+
+ for u in unaffectedList:
+ u_installed += match(u, vardbapi)
+
+ install_unaffected = True
+ for i in v_installed:
+ if i not in u_installed:
+ install_unaffected = False
+
+ if install_unaffected:
+ return rValue
+
+ for u in unaffectedList:
+ mylist = match(u, portdbapi, match_type="match-all")
+ for c in mylist:
+ c_pv = catpkgsplit(c)
+ i_pv = catpkgsplit(best(v_installed))
+ if pkgcmp(c_pv[1:], i_pv[1:]) > 0 \
+ and (rValue == None \
+ or not match("="+rValue, portdbapi) \
+ or (minimize ^ (pkgcmp(c_pv[1:], catpkgsplit(rValue)[1:]) > 0)) \
+ and match("="+c, portdbapi)) \
+ and portdbapi.aux_get(c, ["SLOT"]) == vardbapi.aux_get(best(v_installed), ["SLOT"]):
+ rValue = c_pv[0]+"/"+c_pv[1]+"-"+c_pv[2]
+ if c_pv[3] != "r0": # we don't like -r0 for display
+ rValue += "-"+c_pv[3]
+ return rValue
+
+def format_date(datestr):
+ """
+ Takes a date (announced, revised) date from a GLSA and formats
+ it as readable text (i.e. "January 1, 2008").
+
+ @type date: String
+ @param date: the date string to reformat
+ @rtype: String
+ @return: a reformatted string, or the original string
+ if it cannot be reformatted.
+ """
+ splitdate = datestr.split("-", 2)
+ if len(splitdate) != 3:
+ return datestr
+
+ # This cannot raise an error as we use () instead of []
+ splitdate = (int(x) for x in splitdate)
+
+ from datetime import date
+ try:
+ d = date(*splitdate)
+ except ValueError:
+ return datestr
+
+ # TODO We could format to local date format '%x' here?
+ return _unicode_decode(d.strftime("%B %d, %Y"),
+ encoding=_encodings['content'], errors='replace')
+
+# simple Exception classes to catch specific errors
+class GlsaTypeException(Exception):
+ def __init__(self, doctype):
+ Exception.__init__(self, "wrong DOCTYPE: %s" % doctype)
+
+class GlsaFormatException(Exception):
+ pass
+
+class GlsaArgumentException(Exception):
+ pass
+
+# GLSA xml data wrapper class
+class Glsa:
+ """
+ This class is a wrapper for the XML data and provides methods to access
+ and display the contained data.
+ """
+ def __init__(self, myid, myconfig, vardbapi, portdbapi):
+ """
+ Simple constructor to set the ID, store the config and gets the
+ XML data by calling C{self.read()}.
+
+ @type myid: String
+ @param myid: String describing the id for the GLSA object (standard
+ GLSAs have an ID of the form YYYYMM-nn) or an existing
+ filename containing a GLSA.
+ @type myconfig: portage.config
+ @param myconfig: the config that should be used for this object.
+ @type vardbapi: portage.dbapi.vartree.vardbapi
+ @param vardbapi: installed package repository
+ @type portdbapi: portage.dbapi.porttree.portdbapi
+ @param portdbapi: ebuild repository
+ """
+ myid = _unicode_decode(myid,
+ encoding=_encodings['content'], errors='strict')
+ if re.match(r'\d{6}-\d{2}', myid):
+ self.type = "id"
+ elif os.path.exists(myid):
+ self.type = "file"
+ else:
+ raise GlsaArgumentException(_("Given ID %s isn't a valid GLSA ID or filename.") % myid)
+ self.nr = myid
+ self.config = myconfig
+ self.vardbapi = vardbapi
+ self.portdbapi = portdbapi
+ self.read()
+
+ def read(self):
+ """
+ Here we build the filename from the config and the ID and pass
+ it to urllib to fetch it from the filesystem or a remote server.
+
+ @rtype: None
+ @return: None
+ """
+ if "GLSA_DIR" in self.config:
+ repository = "file://" + self.config["GLSA_DIR"]+"/"
+ else:
+ repository = "file://" + self.config["PORTDIR"] + "/metadata/glsa/"
+ if self.type == "file":
+ myurl = "file://"+self.nr
+ else:
+ myurl = repository + "glsa-%s.xml" % str(self.nr)
+ self.parse(urllib_request_urlopen(myurl))
+ return None
+
+ def parse(self, myfile):
+ """
+ This method parses the XML file and sets up the internal data
+ structures by calling the different helper functions in this
+ module.
+
+ @type myfile: String
+ @param myfile: Filename to grab the XML data from
+ @rtype: None
+ @returns: None
+ """
+ self.DOM = xml.dom.minidom.parse(myfile)
+ if not self.DOM.doctype:
+ raise GlsaTypeException(None)
+ elif self.DOM.doctype.systemId == "http://www.gentoo.org/dtd/glsa.dtd":
+ self.dtdversion = 0
+ elif self.DOM.doctype.systemId == "http://www.gentoo.org/dtd/glsa-2.dtd":
+ self.dtdversion = 2
+ else:
+ raise GlsaTypeException(self.DOM.doctype.systemId)
+ myroot = self.DOM.getElementsByTagName("glsa")[0]
+ if self.type == "id" and myroot.getAttribute("id") != self.nr:
+ raise GlsaFormatException(_("filename and internal id don't match:") + myroot.getAttribute("id") + " != " + self.nr)
+
+ # the simple (single, required, top-level, #PCDATA) tags first
+ self.title = getText(myroot.getElementsByTagName("title")[0], format="strip")
+ self.synopsis = getText(myroot.getElementsByTagName("synopsis")[0], format="strip")
+ self.announced = format_date(getText(myroot.getElementsByTagName("announced")[0], format="strip"))
+
+ count = 1
+ # Support both formats of revised:
+ # <revised>December 30, 2007: 02</revised>
+ # <revised count="2">2007-12-30</revised>
+ revisedEl = myroot.getElementsByTagName("revised")[0]
+ self.revised = getText(revisedEl, format="strip")
+ if ((sys.hexversion >= 0x3000000 and "count" in revisedEl.attributes) or
+ (sys.hexversion < 0x3000000 and revisedEl.attributes.has_key("count"))):
+ count = revisedEl.getAttribute("count")
+ elif (self.revised.find(":") >= 0):
+ (self.revised, count) = self.revised.split(":")
+
+ self.revised = format_date(self.revised)
+
+ try:
+ self.count = int(count)
+ except ValueError:
+ # TODO should this raise a GlsaFormatException?
+ self.count = 1
+
+ # now the optional and 0-n toplevel, #PCDATA tags and references
+ try:
+ self.access = getText(myroot.getElementsByTagName("access")[0], format="strip")
+ except IndexError:
+ self.access = ""
+ self.bugs = getMultiTagsText(myroot, "bug", format="strip")
+ self.references = getMultiTagsText(myroot.getElementsByTagName("references")[0], "uri", format="keep")
+
+ # and now the formatted text elements
+ self.description = getText(myroot.getElementsByTagName("description")[0], format="xml")
+ self.workaround = getText(myroot.getElementsByTagName("workaround")[0], format="xml")
+ self.resolution = getText(myroot.getElementsByTagName("resolution")[0], format="xml")
+ self.impact_text = getText(myroot.getElementsByTagName("impact")[0], format="xml")
+ self.impact_type = myroot.getElementsByTagName("impact")[0].getAttribute("type")
+ try:
+ self.background = getText(myroot.getElementsByTagName("background")[0], format="xml")
+ except IndexError:
+ self.background = ""
+
+ # finally the interesting tags (product, affected, package)
+ self.glsatype = myroot.getElementsByTagName("product")[0].getAttribute("type")
+ self.product = getText(myroot.getElementsByTagName("product")[0], format="strip")
+ self.affected = myroot.getElementsByTagName("affected")[0]
+ self.packages = {}
+ for p in self.affected.getElementsByTagName("package"):
+ name = p.getAttribute("name")
+ try:
+ name = portage.dep.Atom(name)
+ except portage.exception.InvalidAtom:
+ raise GlsaFormatException(_("invalid package name: %s") % name)
+ if name != name.cp:
+ raise GlsaFormatException(_("invalid package name: %s") % name)
+ name = name.cp
+ if name not in self.packages:
+ self.packages[name] = []
+ tmp = {}
+ tmp["arch"] = p.getAttribute("arch")
+ tmp["auto"] = (p.getAttribute("auto") == "yes")
+ tmp["vul_vers"] = [makeVersion(v) for v in p.getElementsByTagName("vulnerable")]
+ tmp["unaff_vers"] = [makeVersion(v) for v in p.getElementsByTagName("unaffected")]
+ tmp["vul_atoms"] = [makeAtom(name, v) for v in p.getElementsByTagName("vulnerable")]
+ tmp["unaff_atoms"] = [makeAtom(name, v) for v in p.getElementsByTagName("unaffected")]
+ self.packages[name].append(tmp)
+ # TODO: services aren't really used yet
+ self.services = self.affected.getElementsByTagName("service")
+ return None
+
+ def dump(self, outstream=sys.stdout):
+ """
+ Dumps a plaintext representation of this GLSA to I{outfile} or
+ B{stdout} if it is ommitted. You can specify an alternate
+ I{encoding} if needed (default is latin1).
+
+ @type outstream: File
+ @param outfile: Stream that should be used for writing
+ (defaults to sys.stdout)
+ """
+ width = 76
+ outstream.write(("GLSA %s: \n%s" % (self.nr, self.title)).center(width)+"\n")
+ outstream.write((width*"=")+"\n")
+ outstream.write(wrap(self.synopsis, width, caption=_("Synopsis: "))+"\n")
+ outstream.write(_("Announced on: %s\n") % self.announced)
+ outstream.write(_("Last revised on: %s : %02d\n\n") % (self.revised, self.count))
+ if self.glsatype == "ebuild":
+ for k in self.packages:
+ pkg = self.packages[k]
+ for path in pkg:
+ vul_vers = "".join(path["vul_vers"])
+ unaff_vers = "".join(path["unaff_vers"])
+ outstream.write(_("Affected package: %s\n") % k)
+ outstream.write(_("Affected archs: "))
+ if path["arch"] == "*":
+ outstream.write(_("All\n"))
+ else:
+ outstream.write("%s\n" % path["arch"])
+ outstream.write(_("Vulnerable: %s\n") % vul_vers)
+ outstream.write(_("Unaffected: %s\n\n") % unaff_vers)
+ elif self.glsatype == "infrastructure":
+ pass
+ if len(self.bugs) > 0:
+ outstream.write(_("\nRelated bugs: "))
+ for i in range(0, len(self.bugs)):
+ outstream.write(self.bugs[i])
+ if i < len(self.bugs)-1:
+ outstream.write(", ")
+ else:
+ outstream.write("\n")
+ if self.background:
+ outstream.write("\n"+wrap(self.background, width, caption=_("Background: ")))
+ outstream.write("\n"+wrap(self.description, width, caption=_("Description: ")))
+ outstream.write("\n"+wrap(self.impact_text, width, caption=_("Impact: ")))
+ outstream.write("\n"+wrap(self.workaround, width, caption=_("Workaround: ")))
+ outstream.write("\n"+wrap(self.resolution, width, caption=_("Resolution: ")))
+ myreferences = ""
+ for r in self.references:
+ myreferences += (r.replace(" ", SPACE_ESCAPE)+NEWLINE_ESCAPE+" ")
+ outstream.write("\n"+wrap(myreferences, width, caption=_("References: ")))
+ outstream.write("\n")
+
+ def isVulnerable(self):
+ """
+ Tests if the system is affected by this GLSA by checking if any
+ vulnerable package versions are installed. Also checks for affected
+ architectures.
+
+ @rtype: Boolean
+ @returns: True if the system is affected, False if not
+ """
+ rValue = False
+ for k in self.packages:
+ pkg = self.packages[k]
+ for path in pkg:
+ if path["arch"] == "*" or self.config["ARCH"] in path["arch"].split():
+ for v in path["vul_atoms"]:
+ rValue = rValue \
+ or (len(match(v, self.vardbapi)) > 0 \
+ and getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
+ self.portdbapi, self.vardbapi))
+ return rValue
+
+ def isApplied(self):
+ """
+ Looks if the GLSA IDis in the GLSA checkfile to check if this
+ GLSA was already applied.
+
+ @rtype: Boolean
+ @returns: True if the GLSA was applied, False if not
+ """
+ return (self.nr in get_applied_glsas(self.config))
+
+ def inject(self):
+ """
+ Puts the ID of this GLSA into the GLSA checkfile, so it won't
+ show up on future checks. Should be called after a GLSA is
+ applied or on explicit user request.
+
+ @rtype: None
+ @returns: None
+ """
+ if not self.isApplied():
+ checkfile = io.open(
+ _unicode_encode(os.path.join(self.config["EROOT"],
+ CACHE_PATH, "glsa"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a+', encoding=_encodings['content'], errors='strict')
+ checkfile.write(_unicode_decode(self.nr + "\n"))
+ checkfile.close()
+ return None
+
+ def getMergeList(self, least_change=True):
+ """
+ Returns the list of package-versions that have to be merged to
+ apply this GLSA properly. The versions are as low as possible
+ while avoiding downgrades (see L{getMinUpgrade}).
+
+ @type least_change: Boolean
+ @param least_change: True if the smallest possible upgrade should be selected,
+ False for an emerge-like algorithm
+ @rtype: List of Strings
+ @return: list of package-versions that have to be merged
+ """
+ rValue = []
+ for pkg in self.packages:
+ for path in self.packages[pkg]:
+ update = getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
+ self.portdbapi, self.vardbapi, minimize=least_change)
+ if update:
+ rValue.append(update)
+ return rValue
diff --git a/portage_with_autodep/pym/portage/localization.py b/portage_with_autodep/pym/portage/localization.py
new file mode 100644
index 0000000..d16c4b1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/localization.py
@@ -0,0 +1,20 @@
+# localization.py -- Code to manage/help portage localization.
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+# We define this to make the transition easier for us.
+def _(mystr):
+ return mystr
+
+
+def localization_example():
+ # Dict references allow translators to rearrange word order.
+ print(_("You can use this string for translating."))
+ print(_("Strings can be formatted with %(mystr)s like this.") % {"mystr": "VALUES"})
+
+ a_value = "value.of.a"
+ b_value = 123
+ c_value = [1,2,3,4]
+ print(_("A: %(a)s -- B: %(b)s -- C: %(c)s") % {"a":a_value,"b":b_value,"c":c_value})
+
diff --git a/portage_with_autodep/pym/portage/locks.py b/portage_with_autodep/pym/portage/locks.py
new file mode 100644
index 0000000..9ed1d6a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/locks.py
@@ -0,0 +1,395 @@
+# portage: Lock management code
+# Copyright 2004-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
+ "hardlock_name", "hardlink_is_mine", "hardlink_lockfile", \
+ "unhardlink_lockfile", "hardlock_cleanup"]
+
+import errno
+import fcntl
+import stat
+import sys
+import time
+
+import portage
+from portage import os
+from portage.const import PORTAGE_BIN_PATH
+from portage.exception import DirectoryNotFound, FileNotFound, \
+ InvalidData, TryAgain, OperationNotPermitted, PermissionDenied
+from portage.data import portage_gid
+from portage.util import writemsg
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+HARDLINK_FD = -2
+_default_lock_fn = fcntl.lockf
+
+# Used by emerge in order to disable the "waiting for lock" message
+# so that it doesn't interfere with the status display.
+_quiet = False
+
+def lockdir(mydir, flags=0):
+ return lockfile(mydir, wantnewlockfile=1, flags=flags)
+def unlockdir(mylock):
+ return unlockfile(mylock)
+
+def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
+ waiting_msg=None, flags=0):
+ """
+ If wantnewlockfile is True then this creates a lockfile in the parent
+ directory as the file: '.' + basename + '.portage_lockfile'.
+ """
+
+ if not mypath:
+ raise InvalidData(_("Empty path given"))
+
+ if isinstance(mypath, basestring) and mypath[-1] == '/':
+ mypath = mypath[:-1]
+
+ if hasattr(mypath, 'fileno'):
+ mypath = mypath.fileno()
+ if isinstance(mypath, int):
+ lockfilename = mypath
+ wantnewlockfile = 0
+ unlinkfile = 0
+ elif wantnewlockfile:
+ base, tail = os.path.split(mypath)
+ lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
+ del base, tail
+ unlinkfile = 1
+ else:
+ lockfilename = mypath
+
+ if isinstance(mypath, basestring):
+ if not os.path.exists(os.path.dirname(mypath)):
+ raise DirectoryNotFound(os.path.dirname(mypath))
+ preexisting = os.path.exists(lockfilename)
+ old_mask = os.umask(000)
+ try:
+ try:
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR, 0o660)
+ except OSError as e:
+ func_call = "open('%s')" % lockfilename
+ if e.errno == OperationNotPermitted.errno:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(func_call)
+ else:
+ raise
+
+ if not preexisting:
+ try:
+ if os.stat(lockfilename).st_gid != portage_gid:
+ os.chown(lockfilename, -1, portage_gid)
+ except OSError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ return lockfile(mypath,
+ wantnewlockfile=wantnewlockfile,
+ unlinkfile=unlinkfile, waiting_msg=waiting_msg,
+ flags=flags)
+ else:
+ writemsg("%s: chown('%s', -1, %d)\n" % \
+ (e, lockfilename, portage_gid), noiselevel=-1)
+ writemsg(_("Cannot chown a lockfile: '%s'\n") % \
+ lockfilename, noiselevel=-1)
+ writemsg(_("Group IDs of current user: %s\n") % \
+ " ".join(str(n) for n in os.getgroups()),
+ noiselevel=-1)
+ finally:
+ os.umask(old_mask)
+
+ elif isinstance(mypath, int):
+ myfd = mypath
+
+ else:
+ raise ValueError(_("Unknown type passed in '%s': '%s'") % \
+ (type(mypath), mypath))
+
+ # try for a non-blocking lock, if it's held, throw a message
+ # we're waiting on lockfile and use a blocking attempt.
+ locking_method = _default_lock_fn
+ try:
+ locking_method(myfd, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except IOError as e:
+ if not hasattr(e, "errno"):
+ raise
+ if e.errno in (errno.EACCES, errno.EAGAIN):
+ # resource temp unavailable; eg, someone beat us to the lock.
+ if flags & os.O_NONBLOCK:
+ os.close(myfd)
+ raise TryAgain(mypath)
+
+ global _quiet
+ if _quiet:
+ out = None
+ else:
+ out = portage.output.EOutput()
+ if waiting_msg is None:
+ if isinstance(mypath, int):
+ waiting_msg = _("waiting for lock on fd %i") % myfd
+ else:
+ waiting_msg = _("waiting for lock on %s\n") % lockfilename
+ if out is not None:
+ out.ebegin(waiting_msg)
+ # try for the exclusive lock now.
+ try:
+ locking_method(myfd, fcntl.LOCK_EX)
+ except EnvironmentError as e:
+ if out is not None:
+ out.eend(1, str(e))
+ raise
+ if out is not None:
+ out.eend(os.EX_OK)
+ elif e.errno == errno.ENOLCK:
+ # We're not allowed to lock on this FS.
+ os.close(myfd)
+ link_success = False
+ if lockfilename == str(lockfilename):
+ if wantnewlockfile:
+ try:
+ if os.stat(lockfilename)[stat.ST_NLINK] == 1:
+ os.unlink(lockfilename)
+ except OSError:
+ pass
+ link_success = hardlink_lockfile(lockfilename)
+ if not link_success:
+ raise
+ locking_method = None
+ myfd = HARDLINK_FD
+ else:
+ raise
+
+
+ if isinstance(lockfilename, basestring) and \
+ myfd != HARDLINK_FD and _fstat_nlink(myfd) == 0:
+ # The file was deleted on us... Keep trying to make one...
+ os.close(myfd)
+ writemsg(_("lockfile recurse\n"), 1)
+ lockfilename, myfd, unlinkfile, locking_method = lockfile(
+ mypath, wantnewlockfile=wantnewlockfile, unlinkfile=unlinkfile,
+ waiting_msg=waiting_msg, flags=flags)
+
+ writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1)
+ return (lockfilename,myfd,unlinkfile,locking_method)
+
+def _fstat_nlink(fd):
+ """
+ @param fd: an open file descriptor
+ @type fd: Integer
+ @rtype: Integer
+ @return: the current number of hardlinks to the file
+ """
+ try:
+ return os.fstat(fd).st_nlink
+ except EnvironmentError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ # Some filesystems such as CIFS return
+ # ENOENT which means st_nlink == 0.
+ return 0
+ raise
+
+def unlockfile(mytuple):
+
+ #XXX: Compatability hack.
+ if len(mytuple) == 3:
+ lockfilename,myfd,unlinkfile = mytuple
+ locking_method = fcntl.flock
+ elif len(mytuple) == 4:
+ lockfilename,myfd,unlinkfile,locking_method = mytuple
+ else:
+ raise InvalidData
+
+ if(myfd == HARDLINK_FD):
+ unhardlink_lockfile(lockfilename)
+ return True
+
+ # myfd may be None here due to myfd = mypath in lockfile()
+ if isinstance(lockfilename, basestring) and \
+ not os.path.exists(lockfilename):
+ writemsg(_("lockfile does not exist '%s'\n") % lockfilename,1)
+ if myfd is not None:
+ os.close(myfd)
+ return False
+
+ try:
+ if myfd is None:
+ myfd = os.open(lockfilename, os.O_WRONLY,0o660)
+ unlinkfile = 1
+ locking_method(myfd,fcntl.LOCK_UN)
+ except OSError:
+ if isinstance(lockfilename, basestring):
+ os.close(myfd)
+ raise IOError(_("Failed to unlock file '%s'\n") % lockfilename)
+
+ try:
+ # This sleep call was added to allow other processes that are
+ # waiting for a lock to be able to grab it before it is deleted.
+ # lockfile() already accounts for this situation, however, and
+ # the sleep here adds more time than is saved overall, so am
+ # commenting until it is proved necessary.
+ #time.sleep(0.0001)
+ if unlinkfile:
+ locking_method(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ # We won the lock, so there isn't competition for it.
+ # We can safely delete the file.
+ writemsg(_("Got the lockfile...\n"), 1)
+ if _fstat_nlink(myfd) == 1:
+ os.unlink(lockfilename)
+ writemsg(_("Unlinked lockfile...\n"), 1)
+ locking_method(myfd,fcntl.LOCK_UN)
+ else:
+ writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
+ os.close(myfd)
+ return False
+ except SystemExit:
+ raise
+ except Exception as e:
+ writemsg(_("Failed to get lock... someone took it.\n"), 1)
+ writemsg(str(e)+"\n",1)
+
+ # why test lockfilename? because we may have been handed an
+ # fd originally, and the caller might not like having their
+ # open fd closed automatically on them.
+ if isinstance(lockfilename, basestring):
+ os.close(myfd)
+
+ return True
+
+
+
+
+def hardlock_name(path):
+ return path+".hardlock-"+os.uname()[1]+"-"+str(os.getpid())
+
+def hardlink_is_mine(link,lock):
+ try:
+ return os.stat(link).st_nlink == 2
+ except OSError:
+ return False
+
+def hardlink_lockfile(lockfilename, max_wait=14400):
+ """Does the NFS, hardlink shuffle to ensure locking on the disk.
+ We create a PRIVATE lockfile, that is just a placeholder on the disk.
+ Then we HARDLINK the real lockfile to that private file.
+ If our file can 2 references, then we have the lock. :)
+ Otherwise we lather, rise, and repeat.
+ We default to a 4 hour timeout.
+ """
+
+ start_time = time.time()
+ myhardlock = hardlock_name(lockfilename)
+ reported_waiting = False
+
+ while(time.time() < (start_time + max_wait)):
+ # We only need it to exist.
+ myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0o660)
+ os.close(myfd)
+
+ if not os.path.exists(myhardlock):
+ raise FileNotFound(
+ _("Created lockfile is missing: %(filename)s") % \
+ {"filename" : myhardlock})
+
+ try:
+ res = os.link(myhardlock, lockfilename)
+ except OSError:
+ pass
+
+ if hardlink_is_mine(myhardlock, lockfilename):
+ # We have the lock.
+ if reported_waiting:
+ writemsg("\n", noiselevel=-1)
+ return True
+
+ if reported_waiting:
+ writemsg(".", noiselevel=-1)
+ else:
+ reported_waiting = True
+ msg = _("\nWaiting on (hardlink) lockfile: (one '.' per 3 seconds)\n"
+ "%(bin_path)s/clean_locks can fix stuck locks.\n"
+ "Lockfile: %(lockfilename)s\n") % \
+ {"bin_path": PORTAGE_BIN_PATH, "lockfilename": lockfilename}
+ writemsg(msg, noiselevel=-1)
+ time.sleep(3)
+
+ os.unlink(myhardlock)
+ return False
+
+def unhardlink_lockfile(lockfilename):
+ myhardlock = hardlock_name(lockfilename)
+ if hardlink_is_mine(myhardlock, lockfilename):
+ # Make sure not to touch lockfilename unless we really have a lock.
+ try:
+ os.unlink(lockfilename)
+ except OSError:
+ pass
+ try:
+ os.unlink(myhardlock)
+ except OSError:
+ pass
+
+def hardlock_cleanup(path, remove_all_locks=False):
+ mypid = str(os.getpid())
+ myhost = os.uname()[1]
+ mydl = os.listdir(path)
+
+ results = []
+ mycount = 0
+
+ mylist = {}
+ for x in mydl:
+ if os.path.isfile(path+"/"+x):
+ parts = x.split(".hardlock-")
+ if len(parts) == 2:
+ filename = parts[0]
+ hostpid = parts[1].split("-")
+ host = "-".join(hostpid[:-1])
+ pid = hostpid[-1]
+
+ if filename not in mylist:
+ mylist[filename] = {}
+ if host not in mylist[filename]:
+ mylist[filename][host] = []
+ mylist[filename][host].append(pid)
+
+ mycount += 1
+
+
+ results.append(_("Found %(count)s locks") % {"count":mycount})
+
+ for x in mylist:
+ if myhost in mylist[x] or remove_all_locks:
+ mylockname = hardlock_name(path+"/"+x)
+ if hardlink_is_mine(mylockname, path+"/"+x) or \
+ not os.path.exists(path+"/"+x) or \
+ remove_all_locks:
+ for y in mylist[x]:
+ for z in mylist[x][y]:
+ filename = path+"/"+x+".hardlock-"+y+"-"+z
+ if filename == mylockname:
+ continue
+ try:
+ # We're sweeping through, unlinking everyone's locks.
+ os.unlink(filename)
+ results.append(_("Unlinked: ") + filename)
+ except OSError:
+ pass
+ try:
+ os.unlink(path+"/"+x)
+ results.append(_("Unlinked: ") + path+"/"+x)
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except OSError:
+ pass
+ else:
+ try:
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except OSError:
+ pass
+
+ return results
+
diff --git a/portage_with_autodep/pym/portage/mail.py b/portage_with_autodep/pym/portage/mail.py
new file mode 100644
index 0000000..17dfcaf
--- /dev/null
+++ b/portage_with_autodep/pym/portage/mail.py
@@ -0,0 +1,177 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Since python ebuilds remove the 'email' module when USE=build
+# is enabled, use a local import so that
+# portage.proxy.lazyimport._preload_portage_submodules()
+# can load this module even though the 'email' module is missing.
+# The elog mail modules won't work, but at least an ImportError
+# won't cause portage to crash during stage builds. Since the
+# 'smtlib' module imports the 'email' module, that's imported
+# locally as well.
+
+import socket
+import sys
+import time
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+from portage.localization import _
+import portage
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+ def _force_ascii_if_necessary(s):
+ # Force ascii encoding in order to avoid UnicodeEncodeError
+ # from smtplib.sendmail with python3 (bug #291331).
+ s = _unicode_encode(s,
+ encoding='ascii', errors='backslashreplace')
+ s = _unicode_decode(s,
+ encoding='ascii', errors='replace')
+ return s
+
+else:
+
+ def _force_ascii_if_necessary(s):
+ return s
+
+def TextMessage(_text):
+ from email.mime.text import MIMEText
+ mimetext = MIMEText(_text)
+ if sys.hexversion >= 0x3000000:
+ mimetext.set_charset("UTF-8")
+ return mimetext
+
+def create_message(sender, recipient, subject, body, attachments=None):
+
+ from email.header import Header
+ from email.mime.base import MIMEBase as BaseMessage
+ from email.mime.multipart import MIMEMultipart as MultipartMessage
+
+ if sys.hexversion < 0x3000000:
+ sender = _unicode_encode(sender,
+ encoding=_encodings['content'], errors='strict')
+ recipient = _unicode_encode(recipient,
+ encoding=_encodings['content'], errors='strict')
+ subject = _unicode_encode(subject,
+ encoding=_encodings['content'], errors='backslashreplace')
+ body = _unicode_encode(body,
+ encoding=_encodings['content'], errors='backslashreplace')
+
+ if attachments == None:
+ mymessage = TextMessage(body)
+ else:
+ mymessage = MultipartMessage()
+ mymessage.attach(TextMessage(body))
+ for x in attachments:
+ if isinstance(x, BaseMessage):
+ mymessage.attach(x)
+ elif isinstance(x, basestring):
+ if sys.hexversion < 0x3000000:
+ x = _unicode_encode(x,
+ encoding=_encodings['content'],
+ errors='backslashreplace')
+ mymessage.attach(TextMessage(x))
+ else:
+ raise portage.exception.PortageException(_("Can't handle type of attachment: %s") % type(x))
+
+ mymessage.set_unixfrom(sender)
+ mymessage["To"] = recipient
+ mymessage["From"] = sender
+
+ # Use Header as a workaround so that long subject lines are wrapped
+ # correctly by <=python-2.6 (gentoo bug #263370, python issue #1974).
+ # Also, need to force ascii for python3, in order to avoid
+ # UnicodeEncodeError with non-ascii characters:
+ # File "/usr/lib/python3.1/email/header.py", line 189, in __init__
+ # self.append(s, charset, errors)
+ # File "/usr/lib/python3.1/email/header.py", line 262, in append
+ # input_bytes = s.encode(input_charset, errors)
+ #UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-9: ordinal not in range(128)
+ mymessage["Subject"] = Header(_force_ascii_if_necessary(subject))
+ mymessage["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S %z")
+
+ return mymessage
+
+def send_mail(mysettings, message):
+
+ import smtplib
+
+ mymailhost = "localhost"
+ mymailport = 25
+ mymailuser = ""
+ mymailpasswd = ""
+ myrecipient = "root@localhost"
+
+ # Syntax for PORTAGE_ELOG_MAILURI (if defined):
+ # address [[user:passwd@]mailserver[:port]]
+ # where address: recipient address
+ # user: username for smtp auth (defaults to none)
+ # passwd: password for smtp auth (defaults to none)
+ # mailserver: smtp server that should be used to deliver the mail (defaults to localhost)
+ # alternatively this can also be the absolute path to a sendmail binary if you don't want to use smtp
+ # port: port to use on the given smtp server (defaults to 25, values > 100000 indicate that starttls should be used on (port-100000))
+ if " " in mysettings.get("PORTAGE_ELOG_MAILURI", ""):
+ myrecipient, mymailuri = mysettings["PORTAGE_ELOG_MAILURI"].split()
+ if "@" in mymailuri:
+ myauthdata, myconndata = mymailuri.rsplit("@", 1)
+ try:
+ mymailuser,mymailpasswd = myauthdata.split(":")
+ except ValueError:
+ print(_("!!! invalid SMTP AUTH configuration, trying unauthenticated ..."))
+ else:
+ myconndata = mymailuri
+ if ":" in myconndata:
+ mymailhost,mymailport = myconndata.split(":")
+ else:
+ mymailhost = myconndata
+ else:
+ myrecipient = mysettings.get("PORTAGE_ELOG_MAILURI", "")
+
+ myfrom = message.get("From")
+
+ if sys.hexversion < 0x3000000:
+ myrecipient = _unicode_encode(myrecipient,
+ encoding=_encodings['content'], errors='strict')
+ mymailhost = _unicode_encode(mymailhost,
+ encoding=_encodings['content'], errors='strict')
+ mymailport = _unicode_encode(mymailport,
+ encoding=_encodings['content'], errors='strict')
+ myfrom = _unicode_encode(myfrom,
+ encoding=_encodings['content'], errors='strict')
+ mymailuser = _unicode_encode(mymailuser,
+ encoding=_encodings['content'], errors='strict')
+ mymailpasswd = _unicode_encode(mymailpasswd,
+ encoding=_encodings['content'], errors='strict')
+
+ # user wants to use a sendmail binary instead of smtp
+ if mymailhost[0] == os.sep and os.path.exists(mymailhost):
+ fd = os.popen(mymailhost+" -f "+myfrom+" "+myrecipient, "w")
+ fd.write(_force_ascii_if_necessary(message.as_string()))
+ if fd.close() != None:
+ sys.stderr.write(_("!!! %s returned with a non-zero exit code. This generally indicates an error.\n") % mymailhost)
+ else:
+ try:
+ if int(mymailport) > 100000:
+ myconn = smtplib.SMTP(mymailhost, int(mymailport) - 100000)
+ myconn.ehlo()
+ if not myconn.has_extn("STARTTLS"):
+ raise portage.exception.PortageException(_("!!! TLS support requested for logmail but not supported by server"))
+ myconn.starttls()
+ myconn.ehlo()
+ else:
+ myconn = smtplib.SMTP(mymailhost, mymailport)
+ if mymailuser != "" and mymailpasswd != "":
+ myconn.login(mymailuser, mymailpasswd)
+
+ message_str = _force_ascii_if_necessary(message.as_string())
+ myconn.sendmail(myfrom, myrecipient, message_str)
+ myconn.quit()
+ except smtplib.SMTPException as e:
+ raise portage.exception.PortageException(_("!!! An error occurred while trying to send logmail:\n")+str(e))
+ except socket.error as e:
+ raise portage.exception.PortageException(_("!!! A network error occurred while trying to send logmail:\n%s\nSure you configured PORTAGE_ELOG_MAILURI correctly?") % str(e))
+ return
+
diff --git a/portage_with_autodep/pym/portage/manifest.py b/portage_with_autodep/pym/portage/manifest.py
new file mode 100644
index 0000000..13efab7
--- /dev/null
+++ b/portage_with_autodep/pym/portage/manifest.py
@@ -0,0 +1,538 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum:hashfunc_map,perform_multiple_checksums,verify_all',
+ 'portage.util:write_atomic',
+)
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.exception import DigestException, FileNotFound, \
+ InvalidDataType, MissingParameter, PermissionDenied, \
+ PortageException, PortagePackageException
+from portage.localization import _
+
+class FileNotInManifestException(PortageException):
+ pass
+
+def manifest2AuxfileFilter(filename):
+ filename = filename.strip(os.sep)
+ mysplit = filename.split(os.path.sep)
+ if "CVS" in mysplit:
+ return False
+ for x in mysplit:
+ if x[:1] == '.':
+ return False
+ return not filename[:7] == 'digest-'
+
+def manifest2MiscfileFilter(filename):
+ filename = filename.strip(os.sep)
+ return not (filename in ["CVS", ".svn", "files", "Manifest"] or filename.endswith(".ebuild"))
+
+def guessManifestFileType(filename):
+ """ Perform a best effort guess of which type the given filename is, avoid using this if possible """
+ if filename.startswith("files" + os.sep + "digest-"):
+ return None
+ if filename.startswith("files" + os.sep):
+ return "AUX"
+ elif filename.endswith(".ebuild"):
+ return "EBUILD"
+ elif filename in ["ChangeLog", "metadata.xml"]:
+ return "MISC"
+ else:
+ return "DIST"
+
+def parseManifest2(mysplit):
+ myentry = None
+ if len(mysplit) > 4 and mysplit[0] in portage.const.MANIFEST2_IDENTIFIERS:
+ mytype = mysplit[0]
+ myname = mysplit[1]
+ try:
+ mysize = int(mysplit[2])
+ except ValueError:
+ return None
+ myhashes = dict(zip(mysplit[3::2], mysplit[4::2]))
+ myhashes["size"] = mysize
+ myentry = Manifest2Entry(type=mytype, name=myname, hashes=myhashes)
+ return myentry
+
+class ManifestEntry(object):
+ __slots__ = ("type", "name", "hashes")
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+class Manifest2Entry(ManifestEntry):
+ def __str__(self):
+ myline = " ".join([self.type, self.name, str(self.hashes["size"])])
+ myhashkeys = list(self.hashes)
+ myhashkeys.remove("size")
+ myhashkeys.sort()
+ for h in myhashkeys:
+ myline += " " + h + " " + str(self.hashes[h])
+ return myline
+
+ def __eq__(self, other):
+ if not isinstance(other, Manifest2Entry) or \
+ self.type != other.type or \
+ self.name != other.name or \
+ self.hashes != other.hashes:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+class Manifest(object):
+ parsers = (parseManifest2,)
+ def __init__(self, pkgdir, distdir, fetchlist_dict=None,
+ manifest1_compat=False, from_scratch=False):
+ """ create new Manifest instance for package in pkgdir
+ and add compability entries for old portage versions if manifest1_compat == True.
+ Do not parse Manifest file if from_scratch == True (only for internal use)
+ The fetchlist_dict parameter is required only for generation of
+ a Manifest (not needed for parsing and checking sums)."""
+ self.pkgdir = _unicode_decode(pkgdir).rstrip(os.sep) + os.sep
+ self.fhashdict = {}
+ self.hashes = set()
+ self.hashes.update(portage.const.MANIFEST2_HASH_FUNCTIONS)
+ if manifest1_compat:
+ raise NotImplementedError("manifest1 support has been removed")
+ self.hashes.difference_update(hashname for hashname in \
+ list(self.hashes) if hashname not in hashfunc_map)
+ self.hashes.add("size")
+ if manifest1_compat:
+ raise NotImplementedError("manifest1 support has been removed")
+ self.hashes.add(portage.const.MANIFEST2_REQUIRED_HASH)
+ for t in portage.const.MANIFEST2_IDENTIFIERS:
+ self.fhashdict[t] = {}
+ if not from_scratch:
+ self._read()
+ if fetchlist_dict != None:
+ self.fetchlist_dict = fetchlist_dict
+ else:
+ self.fetchlist_dict = {}
+ self.distdir = distdir
+ self.guessType = guessManifestFileType
+
+ def getFullname(self):
+ """ Returns the absolute path to the Manifest file for this instance """
+ return os.path.join(self.pkgdir, "Manifest")
+
+ def getDigests(self):
+ """ Compability function for old digest/manifest code, returns dict of filename:{hashfunction:hashvalue} """
+ rval = {}
+ for t in portage.const.MANIFEST2_IDENTIFIERS:
+ rval.update(self.fhashdict[t])
+ return rval
+
+ def getTypeDigests(self, ftype):
+ """ Similar to getDigests(), but restricted to files of the given type. """
+ return self.fhashdict[ftype]
+
+ def _readManifest(self, file_path, myhashdict=None, **kwargs):
+ """Parse a manifest. If myhashdict is given then data will be added too it.
+ Otherwise, a new dict will be created and returned."""
+ try:
+ fd = io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['repo.content'], errors='replace')
+ if myhashdict is None:
+ myhashdict = {}
+ self._parseDigests(fd, myhashdict=myhashdict, **kwargs)
+ fd.close()
+ return myhashdict
+ except (OSError, IOError) as e:
+ if e.errno == errno.ENOENT:
+ raise FileNotFound(file_path)
+ else:
+ raise
+
+ def _read(self):
+ """ Parse Manifest file for this instance """
+ try:
+ self._readManifest(self.getFullname(), myhashdict=self.fhashdict)
+ except FileNotFound:
+ pass
+
+ def _parseManifestLines(self, mylines):
+ """Parse manifest lines and return a list of manifest entries."""
+ for myline in mylines:
+ myentry = None
+ mysplit = myline.split()
+ for parser in self.parsers:
+ myentry = parser(mysplit)
+ if myentry is not None:
+ yield myentry
+ break # go to the next line
+
+ def _parseDigests(self, mylines, myhashdict=None, mytype=None):
+ """Parse manifest entries and store the data in myhashdict. If mytype
+ is specified, it will override the type for all parsed entries."""
+ if myhashdict is None:
+ myhashdict = {}
+ for myentry in self._parseManifestLines(mylines):
+ if mytype is None:
+ myentry_type = myentry.type
+ else:
+ myentry_type = mytype
+ myhashdict.setdefault(myentry_type, {})
+ myhashdict[myentry_type].setdefault(myentry.name, {})
+ myhashdict[myentry_type][myentry.name].update(myentry.hashes)
+ return myhashdict
+
+ def _getDigestData(self, distlist):
+ """create a hash dict for a specific list of files"""
+ myhashdict = {}
+ for myname in distlist:
+ for mytype in self.fhashdict:
+ if myname in self.fhashdict[mytype]:
+ myhashdict.setdefault(mytype, {})
+ myhashdict[mytype].setdefault(myname, {})
+ myhashdict[mytype][myname].update(self.fhashdict[mytype][myname])
+ return myhashdict
+
+ def _createManifestEntries(self):
+ valid_hashes = set(portage.const.MANIFEST2_HASH_FUNCTIONS)
+ valid_hashes.add('size')
+ mytypes = list(self.fhashdict)
+ mytypes.sort()
+ for t in mytypes:
+ myfiles = list(self.fhashdict[t])
+ myfiles.sort()
+ for f in myfiles:
+ myentry = Manifest2Entry(
+ type=t, name=f, hashes=self.fhashdict[t][f].copy())
+ for h in list(myentry.hashes):
+ if h not in valid_hashes:
+ del myentry.hashes[h]
+ yield myentry
+
+ def checkIntegrity(self):
+ for t in self.fhashdict:
+ for f in self.fhashdict[t]:
+ if portage.const.MANIFEST2_REQUIRED_HASH not in self.fhashdict[t][f]:
+ raise MissingParameter(_("Missing %s checksum: %s %s") % (portage.const.MANIFEST2_REQUIRED_HASH, t, f))
+
+ def write(self, sign=False, force=False):
+ """ Write Manifest instance to disk, optionally signing it """
+ self.checkIntegrity()
+ try:
+ myentries = list(self._createManifestEntries())
+ update_manifest = True
+ if not force:
+ try:
+ f = io.open(_unicode_encode(self.getFullname(),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ oldentries = list(self._parseManifestLines(f))
+ f.close()
+ if len(oldentries) == len(myentries):
+ update_manifest = False
+ for i in range(len(oldentries)):
+ if oldentries[i] != myentries[i]:
+ update_manifest = True
+ break
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+ if update_manifest:
+ write_atomic(self.getFullname(),
+ "".join("%s\n" % str(myentry) for myentry in myentries))
+ if sign:
+ self.sign()
+ except (IOError, OSError) as e:
+ if e.errno == errno.EACCES:
+ raise PermissionDenied(str(e))
+ raise
+
+ def sign(self):
+ """ Sign the Manifest """
+ raise NotImplementedError()
+
+ def validateSignature(self):
+ """ Validate signature on Manifest """
+ raise NotImplementedError()
+
+ def addFile(self, ftype, fname, hashdict=None, ignoreMissing=False):
+ """ Add entry to Manifest optionally using hashdict to avoid recalculation of hashes """
+ if ftype == "AUX" and not fname.startswith("files/"):
+ fname = os.path.join("files", fname)
+ if not os.path.exists(self.pkgdir+fname) and not ignoreMissing:
+ raise FileNotFound(fname)
+ if not ftype in portage.const.MANIFEST2_IDENTIFIERS:
+ raise InvalidDataType(ftype)
+ if ftype == "AUX" and fname.startswith("files"):
+ fname = fname[6:]
+ self.fhashdict[ftype][fname] = {}
+ if hashdict != None:
+ self.fhashdict[ftype][fname].update(hashdict)
+ if not portage.const.MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]:
+ self.updateFileHashes(ftype, fname, checkExisting=False, ignoreMissing=ignoreMissing)
+
+ def removeFile(self, ftype, fname):
+ """ Remove given entry from Manifest """
+ del self.fhashdict[ftype][fname]
+
+ def hasFile(self, ftype, fname):
+ """ Return whether the Manifest contains an entry for the given type,filename pair """
+ return (fname in self.fhashdict[ftype])
+
+ def findFile(self, fname):
+ """ Return entrytype of the given file if present in Manifest or None if not present """
+ for t in portage.const.MANIFEST2_IDENTIFIERS:
+ if fname in self.fhashdict[t]:
+ return t
+ return None
+
+ def create(self, checkExisting=False, assumeDistHashesSometimes=False,
+ assumeDistHashesAlways=False, requiredDistfiles=[]):
+ """ Recreate this Manifest from scratch. This will not use any
+ existing checksums unless assumeDistHashesSometimes or
+ assumeDistHashesAlways is true (assumeDistHashesSometimes will only
+ cause DIST checksums to be reused if the file doesn't exist in
+ DISTDIR). The requiredDistfiles parameter specifies a list of
+ distfiles to raise a FileNotFound exception for (if no file or existing
+ checksums are available), and defaults to all distfiles when not
+ specified."""
+ if checkExisting:
+ self.checkAllHashes()
+ if assumeDistHashesSometimes or assumeDistHashesAlways:
+ distfilehashes = self.fhashdict["DIST"]
+ else:
+ distfilehashes = {}
+ self.__init__(self.pkgdir, self.distdir,
+ fetchlist_dict=self.fetchlist_dict, from_scratch=True,
+ manifest1_compat=False)
+ cpvlist = []
+ pn = os.path.basename(self.pkgdir.rstrip(os.path.sep))
+ cat = self._pkgdir_category()
+
+ pkgdir = self.pkgdir
+
+ for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir):
+ break
+ for f in pkgdir_files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if f[:1] == ".":
+ continue
+ pf = None
+ if f[-7:] == '.ebuild':
+ pf = f[:-7]
+ if pf is not None:
+ mytype = "EBUILD"
+ ps = portage.versions._pkgsplit(pf)
+ cpv = "%s/%s" % (cat, pf)
+ if not ps:
+ raise PortagePackageException(
+ _("Invalid package name: '%s'") % cpv)
+ if ps[0] != pn:
+ raise PortagePackageException(
+ _("Package name does not "
+ "match directory name: '%s'") % cpv)
+ cpvlist.append(cpv)
+ elif manifest2MiscfileFilter(f):
+ mytype = "MISC"
+ else:
+ continue
+ self.fhashdict[mytype][f] = perform_multiple_checksums(self.pkgdir+f, self.hashes)
+ recursive_files = []
+
+ pkgdir = self.pkgdir
+ cut_len = len(os.path.join(pkgdir, "files") + os.sep)
+ for parentdir, dirs, files in os.walk(os.path.join(pkgdir, "files")):
+ for f in files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ full_path = os.path.join(parentdir, f)
+ recursive_files.append(full_path[cut_len:])
+ for f in recursive_files:
+ if not manifest2AuxfileFilter(f):
+ continue
+ self.fhashdict["AUX"][f] = perform_multiple_checksums(
+ os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
+ distlist = set()
+ for cpv in cpvlist:
+ distlist.update(self._getCpvDistfiles(cpv))
+ if requiredDistfiles is None:
+ # This allows us to force removal of stale digests for the
+ # ebuild --force digest option (no distfiles are required).
+ requiredDistfiles = set()
+ elif len(requiredDistfiles) == 0:
+ # repoman passes in an empty list, which implies that all distfiles
+ # are required.
+ requiredDistfiles = distlist.copy()
+ required_hash_types = set()
+ required_hash_types.add("size")
+ required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
+ for f in distlist:
+ fname = os.path.join(self.distdir, f)
+ mystat = None
+ try:
+ mystat = os.stat(fname)
+ except OSError:
+ pass
+ if f in distfilehashes and \
+ not required_hash_types.difference(distfilehashes[f]) and \
+ ((assumeDistHashesSometimes and mystat is None) or \
+ (assumeDistHashesAlways and mystat is None) or \
+ (assumeDistHashesAlways and mystat is not None and \
+ len(distfilehashes[f]) == len(self.hashes) and \
+ distfilehashes[f]["size"] == mystat.st_size)):
+ self.fhashdict["DIST"][f] = distfilehashes[f]
+ else:
+ try:
+ self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes)
+ except FileNotFound:
+ if f in requiredDistfiles:
+ raise
+
+ def _pkgdir_category(self):
+ return self.pkgdir.rstrip(os.sep).split(os.sep)[-2]
+
+ def _getAbsname(self, ftype, fname):
+ if ftype == "DIST":
+ absname = os.path.join(self.distdir, fname)
+ elif ftype == "AUX":
+ absname = os.path.join(self.pkgdir, "files", fname)
+ else:
+ absname = os.path.join(self.pkgdir, fname)
+ return absname
+
+ def checkAllHashes(self, ignoreMissingFiles=False):
+ for t in portage.const.MANIFEST2_IDENTIFIERS:
+ self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
+
+ def checkTypeHashes(self, idtype, ignoreMissingFiles=False):
+ for f in self.fhashdict[idtype]:
+ self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles)
+
+ def checkFileHashes(self, ftype, fname, ignoreMissing=False):
+ myhashes = self.fhashdict[ftype][fname]
+ try:
+ ok,reason = verify_all(self._getAbsname(ftype, fname), self.fhashdict[ftype][fname])
+ if not ok:
+ raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason)))
+ return ok, reason
+ except FileNotFound as e:
+ if not ignoreMissing:
+ raise
+ return False, _("File Not Found: '%s'") % str(e)
+
+ def checkCpvHashes(self, cpv, checkDistfiles=True, onlyDistfiles=False, checkMiscfiles=False):
+ """ check the hashes for all files associated to the given cpv, include all
+ AUX files and optionally all MISC files. """
+ if not onlyDistfiles:
+ self.checkTypeHashes("AUX", ignoreMissingFiles=False)
+ if checkMiscfiles:
+ self.checkTypeHashes("MISC", ignoreMissingFiles=False)
+ ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+ self.checkFileHashes("EBUILD", ebuildname, ignoreMissing=False)
+ if checkDistfiles or onlyDistfiles:
+ for f in self._getCpvDistfiles(cpv):
+ self.checkFileHashes("DIST", f, ignoreMissing=False)
+
+ def _getCpvDistfiles(self, cpv):
+ """ Get a list of all DIST files associated to the given cpv """
+ return self.fetchlist_dict[cpv]
+
+ def getDistfilesSize(self, fetchlist):
+ total_bytes = 0
+ for f in fetchlist:
+ total_bytes += int(self.fhashdict["DIST"][f]["size"])
+ return total_bytes
+
+ def updateFileHashes(self, ftype, fname, checkExisting=True, ignoreMissing=True, reuseExisting=False):
+ """ Regenerate hashes for the given file """
+ if checkExisting:
+ self.checkFileHashes(ftype, fname, ignoreMissing=ignoreMissing)
+ if not ignoreMissing and fname not in self.fhashdict[ftype]:
+ raise FileNotInManifestException(fname)
+ if fname not in self.fhashdict[ftype]:
+ self.fhashdict[ftype][fname] = {}
+ myhashkeys = list(self.hashes)
+ if reuseExisting:
+ for k in [h for h in self.fhashdict[ftype][fname] if h in myhashkeys]:
+ myhashkeys.remove(k)
+ myhashes = perform_multiple_checksums(self._getAbsname(ftype, fname), myhashkeys)
+ self.fhashdict[ftype][fname].update(myhashes)
+
+ def updateTypeHashes(self, idtype, checkExisting=False, ignoreMissingFiles=True):
+ """ Regenerate all hashes for all files of the given type """
+ for fname in self.fhashdict[idtype]:
+ self.updateFileHashes(idtype, fname, checkExisting)
+
+ def updateAllHashes(self, checkExisting=False, ignoreMissingFiles=True):
+ """ Regenerate all hashes for all files in this Manifest. """
+ for idtype in portage.const.MANIFEST2_IDENTIFIERS:
+ self.updateTypeHashes(idtype, checkExisting=checkExisting,
+ ignoreMissingFiles=ignoreMissingFiles)
+
+ def updateCpvHashes(self, cpv, ignoreMissingFiles=True):
+ """ Regenerate all hashes associated to the given cpv (includes all AUX and MISC
+ files)."""
+ self.updateTypeHashes("AUX", ignoreMissingFiles=ignoreMissingFiles)
+ self.updateTypeHashes("MISC", ignoreMissingFiles=ignoreMissingFiles)
+ ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+ self.updateFileHashes("EBUILD", ebuildname, ignoreMissingFiles=ignoreMissingFiles)
+ for f in self._getCpvDistfiles(cpv):
+ self.updateFileHashes("DIST", f, ignoreMissingFiles=ignoreMissingFiles)
+
+ def updateHashesGuessType(self, fname, *args, **kwargs):
+ """ Regenerate hashes for the given file (guesses the type and then
+ calls updateFileHashes)."""
+ mytype = self.guessType(fname)
+ if mytype == "AUX":
+ fname = fname[len("files" + os.sep):]
+ elif mytype is None:
+ return
+ myrealtype = self.findFile(fname)
+ if myrealtype is not None:
+ mytype = myrealtype
+ return self.updateFileHashes(mytype, fname, *args, **kwargs)
+
+ def getFileData(self, ftype, fname, key):
+ """ Return the value of a specific (type,filename,key) triple, mainly useful
+ to get the size for distfiles."""
+ return self.fhashdict[ftype][fname][key]
+
+ def getVersions(self):
+ """ Returns a list of manifest versions present in the manifest file. """
+ rVal = []
+ mfname = self.getFullname()
+ if not os.path.exists(mfname):
+ return rVal
+ myfile = io.open(_unicode_encode(mfname,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace')
+ lines = myfile.readlines()
+ myfile.close()
+ for l in lines:
+ mysplit = l.split()
+ if len(mysplit) == 4 and mysplit[0] in portage.const.MANIFEST1_HASH_FUNCTIONS and not 1 in rVal:
+ rVal.append(1)
+ elif len(mysplit) > 4 and mysplit[0] in portage.const.MANIFEST2_IDENTIFIERS and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal:
+ rVal.append(2)
+ return rVal
+
+ def _catsplit(self, pkg_key):
+ """Split a category and package, returning a list of [cat, pkg].
+ This is compatible with portage.catsplit()"""
+ return pkg_key.split("/", 1)
diff --git a/portage_with_autodep/pym/portage/news.py b/portage_with_autodep/pym/portage/news.py
new file mode 100644
index 0000000..866e5b0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/news.py
@@ -0,0 +1,351 @@
+# portage: news management code
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["NewsManager", "NewsItem", "DisplayRestriction",
+ "DisplayProfileRestriction", "DisplayKeywordRestriction",
+ "DisplayInstalledRestriction"]
+
+import io
+import logging
+import os as _os
+import re
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.util import apply_secpass_permissions, ensure_dirs, \
+ grabfile, normalize_path, write_atomic, writemsg_level
+from portage.data import portage_gid
+from portage.dep import isvalidatom
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.exception import InvalidLocation, OperationNotPermitted, \
+ PermissionDenied
+
+class NewsManager(object):
+ """
+ This object manages GLEP 42 style news items. It will cache news items
+ that have previously shown up and notify users when there are relevant news
+ items that apply to their packages that the user has not previously read.
+
+ Creating a news manager requires:
+ root - typically ${ROOT} see man make.conf and man emerge for details
+ news_path - path to news items; usually $REPODIR/metadata/news
+ unread_path - path to the news.repoid.unread file; this helps us track news items
+
+ """
+
+ def __init__(self, portdb, vardb, news_path, unread_path, language_id='en'):
+ self.news_path = news_path
+ self.unread_path = unread_path
+ self.target_root = vardb.root
+ self.language_id = language_id
+ self.config = vardb.settings
+ self.vdb = vardb
+ self.portdb = portdb
+
+ # GLEP 42 says:
+ # All news item related files should be root owned and in the
+ # portage group with the group write (and, for directories,
+ # execute) bits set. News files should be world readable.
+ self._uid = int(self.config["PORTAGE_INST_UID"])
+ self._gid = portage_gid
+ self._file_mode = 0o0064
+ self._dir_mode = 0o0074
+ self._mode_mask = 0o0000
+
+ portdir = portdb.porttree_root
+ profiles_base = os.path.join(portdir, 'profiles') + os.path.sep
+ profile_path = None
+ if portdb.settings.profile_path:
+ profile_path = normalize_path(
+ os.path.realpath(portdb.settings.profile_path))
+ if profile_path.startswith(profiles_base):
+ profile_path = profile_path[len(profiles_base):]
+ self._profile_path = profile_path
+
+ def _unread_filename(self, repoid):
+ return os.path.join(self.unread_path, 'news-%s.unread' % repoid)
+
+ def _skip_filename(self, repoid):
+ return os.path.join(self.unread_path, 'news-%s.skip' % repoid)
+
+ def _news_dir(self, repoid):
+ repo_path = self.portdb.getRepositoryPath(repoid)
+ if repo_path is None:
+ raise AssertionError(_("Invalid repoID: %s") % repoid)
+ return os.path.join(repo_path, self.news_path)
+
+ def updateItems(self, repoid):
+ """
+ Figure out which news items from NEWS_PATH are both unread and relevant to
+ the user (according to the GLEP 42 standards of relevancy). Then add these
+ items into the news.repoid.unread file.
+ """
+
+ # Ensure that the unread path exists and is writable.
+
+ try:
+ ensure_dirs(self.unread_path, uid=self._uid, gid=self._gid,
+ mode=self._dir_mode, mask=self._mode_mask)
+ except (OperationNotPermitted, PermissionDenied):
+ return
+
+ if not os.access(self.unread_path, os.W_OK):
+ return
+
+ news_dir = self._news_dir(repoid)
+ try:
+ news = _os.listdir(_unicode_encode(news_dir,
+ encoding=_encodings['fs'], errors='strict'))
+ except OSError:
+ return
+
+ skip_filename = self._skip_filename(repoid)
+ unread_filename = self._unread_filename(repoid)
+ unread_lock = lockfile(unread_filename, wantnewlockfile=1)
+ try:
+ try:
+ unread = set(grabfile(unread_filename))
+ unread_orig = unread.copy()
+ skip = set(grabfile(skip_filename))
+ skip_orig = skip.copy()
+ except PermissionDenied:
+ return
+
+ updates = []
+ for itemid in news:
+ try:
+ itemid = _unicode_decode(itemid,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ itemid = _unicode_decode(itemid,
+ encoding=_encodings['fs'], errors='replace')
+ writemsg_level(
+ _("!!! Invalid encoding in news item name: '%s'\n") % \
+ itemid, level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if itemid in skip:
+ continue
+ filename = os.path.join(news_dir, itemid,
+ itemid + "." + self.language_id + ".txt")
+ if not os.path.isfile(filename):
+ continue
+ item = NewsItem(filename, itemid)
+ if not item.isValid():
+ continue
+ if item.isRelevant(profile=self._profile_path,
+ config=self.config, vardb=self.vdb):
+ unread.add(item.name)
+ skip.add(item.name)
+
+ if unread != unread_orig:
+ write_atomic(unread_filename,
+ "".join("%s\n" % x for x in sorted(unread)))
+ apply_secpass_permissions(unread_filename,
+ uid=self._uid, gid=self._gid,
+ mode=self._file_mode, mask=self._mode_mask)
+
+ if skip != skip_orig:
+ write_atomic(skip_filename,
+ "".join("%s\n" % x for x in sorted(skip)))
+ apply_secpass_permissions(skip_filename,
+ uid=self._uid, gid=self._gid,
+ mode=self._file_mode, mask=self._mode_mask)
+
+ finally:
+ unlockfile(unread_lock)
+
+ def getUnreadItems(self, repoid, update=False):
+ """
+ Determine if there are unread relevant items in news.repoid.unread.
+ If there are unread items return their number.
+ If update is specified, updateNewsItems( repoid ) will be called to
+ check for new items.
+ """
+
+ if update:
+ self.updateItems(repoid)
+
+ unread_filename = self._unread_filename(repoid)
+ unread_lock = None
+ try:
+ unread_lock = lockfile(unread_filename, wantnewlockfile=1)
+ except (InvalidLocation, OperationNotPermitted, PermissionDenied):
+ pass
+ try:
+ try:
+ return len(grabfile(unread_filename))
+ except PermissionDenied:
+ return 0
+ finally:
+ if unread_lock:
+ unlockfile(unread_lock)
+
+_formatRE = re.compile("News-Item-Format:\s*([^\s]*)\s*$")
+_installedRE = re.compile("Display-If-Installed:(.*)\n")
+_profileRE = re.compile("Display-If-Profile:(.*)\n")
+_keywordRE = re.compile("Display-If-Keyword:(.*)\n")
+
+class NewsItem(object):
+ """
+ This class encapsulates a GLEP 42 style news item.
+ It's purpose is to wrap parsing of these news items such that portage can determine
+ whether a particular item is 'relevant' or not. This requires parsing the item
+ and determining 'relevancy restrictions'; these include "Display if Installed" or
+ "display if arch: x86" and so forth.
+
+ Creation of a news item involves passing in the path to the particular news item.
+ """
+
+ def __init__(self, path, name):
+ """
+ For a given news item we only want if it path is a file.
+ """
+ self.path = path
+ self.name = name
+ self._parsed = False
+ self._valid = True
+
+ def isRelevant(self, vardb, config, profile):
+ """
+ This function takes a dict of keyword arguments; one should pass in any
+ objects need to do to lookups (like what keywords we are on, what profile,
+ and a vardb so we can look at installed packages).
+ Each restriction will pluck out the items that are required for it to match
+ or raise a ValueError exception if the required object is not present.
+
+ Restrictions of the form Display-X are OR'd with like-restrictions;
+ otherwise restrictions are AND'd. any_match is the ORing and
+ all_match is the ANDing.
+ """
+
+ if not self._parsed:
+ self.parse()
+
+ if not len(self.restrictions):
+ return True
+
+ kwargs = \
+ { 'vardb' : vardb,
+ 'config' : config,
+ 'profile' : profile }
+
+ all_match = True
+ for values in self.restrictions.values():
+ any_match = False
+ for restriction in values:
+ if restriction.checkRestriction(**kwargs):
+ any_match = True
+ if not any_match:
+ all_match = False
+
+ return all_match
+
+ def isValid(self):
+ if not self._parsed:
+ self.parse()
+ return self._valid
+
+ def parse(self):
+ lines = io.open(_unicode_encode(self.path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace'
+ ).readlines()
+ self.restrictions = {}
+ invalids = []
+ for i, line in enumerate(lines):
+ # Optimization to ignore regex matchines on lines that
+ # will never match
+ format_match = _formatRE.match(line)
+ if format_match is not None and format_match.group(1) != '1.0':
+ invalids.append((i + 1, line.rstrip('\n')))
+ break
+ if not line.startswith('D'):
+ continue
+ restricts = { _installedRE : DisplayInstalledRestriction,
+ _profileRE : DisplayProfileRestriction,
+ _keywordRE : DisplayKeywordRestriction }
+ for regex, restriction in restricts.items():
+ match = regex.match(line)
+ if match:
+ restrict = restriction(match.groups()[0].strip())
+ if not restrict.isValid():
+ invalids.append((i + 1, line.rstrip("\n")))
+ else:
+ self.restrictions.setdefault(
+ id(restriction), []).append(restrict)
+ continue
+ if invalids:
+ self._valid = False
+ msg = []
+ msg.append(_("Invalid news item: %s") % (self.path,))
+ for lineno, line in invalids:
+ msg.append(_(" line %d: %s") % (lineno, line))
+ writemsg_level("".join("!!! %s\n" % x for x in msg),
+ level=logging.ERROR, noiselevel=-1)
+
+ self._parsed = True
+
+class DisplayRestriction(object):
+ """
+ A base restriction object representing a restriction of display.
+ news items may have 'relevancy restrictions' preventing them from
+ being important. In this case we need a manner of figuring out if
+ a particular item is relevant or not. If any of it's restrictions
+ are met, then it is displayed
+ """
+
+ def isValid(self):
+ return True
+
+ def checkRestriction(self, **kwargs):
+ raise NotImplementedError('Derived class should override this method')
+
+class DisplayProfileRestriction(DisplayRestriction):
+ """
+ A profile restriction where a particular item shall only be displayed
+ if the user is running a specific profile.
+ """
+
+ def __init__(self, profile):
+ self.profile = profile
+
+ def checkRestriction(self, **kwargs):
+ if self.profile == kwargs['profile']:
+ return True
+ return False
+
+class DisplayKeywordRestriction(DisplayRestriction):
+ """
+ A keyword restriction where a particular item shall only be displayed
+ if the user is running a specific keyword.
+ """
+
+ def __init__(self, keyword):
+ self.keyword = keyword
+
+ def checkRestriction(self, **kwargs):
+ if kwargs['config']['ARCH'] == self.keyword:
+ return True
+ return False
+
+class DisplayInstalledRestriction(DisplayRestriction):
+ """
+ An Installation restriction where a particular item shall only be displayed
+ if the user has that item installed.
+ """
+
+ def __init__(self, atom):
+ self.atom = atom
+
+ def isValid(self):
+ return isvalidatom(self.atom)
+
+ def checkRestriction(self, **kwargs):
+ vdb = kwargs['vardb']
+ if vdb.match(self.atom):
+ return True
+ return False
diff --git a/portage_with_autodep/pym/portage/output.py b/portage_with_autodep/pym/portage/output.py
new file mode 100644
index 0000000..0e8245f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/output.py
@@ -0,0 +1,794 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__docformat__ = "epytext"
+
+import errno
+import io
+import formatter
+import re
+import sys
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:writemsg',
+)
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.const import COLOR_MAP_FILE
+from portage.exception import CommandNotFound, FileNotFound, \
+ ParseError, PermissionDenied, PortageException
+from portage.localization import _
+
+havecolor=1
+dotitles=1
+
+_styles = {}
+"""Maps style class to tuple of attribute names."""
+
+codes = {}
+"""Maps attribute name to ansi code."""
+
+esc_seq = "\x1b["
+
+codes["normal"] = esc_seq + "0m"
+codes["reset"] = esc_seq + "39;49;00m"
+
+codes["bold"] = esc_seq + "01m"
+codes["faint"] = esc_seq + "02m"
+codes["standout"] = esc_seq + "03m"
+codes["underline"] = esc_seq + "04m"
+codes["blink"] = esc_seq + "05m"
+codes["overline"] = esc_seq + "06m"
+codes["reverse"] = esc_seq + "07m"
+codes["invisible"] = esc_seq + "08m"
+
+codes["no-attr"] = esc_seq + "22m"
+codes["no-standout"] = esc_seq + "23m"
+codes["no-underline"] = esc_seq + "24m"
+codes["no-blink"] = esc_seq + "25m"
+codes["no-overline"] = esc_seq + "26m"
+codes["no-reverse"] = esc_seq + "27m"
+
+codes["bg_black"] = esc_seq + "40m"
+codes["bg_darkred"] = esc_seq + "41m"
+codes["bg_darkgreen"] = esc_seq + "42m"
+codes["bg_brown"] = esc_seq + "43m"
+codes["bg_darkblue"] = esc_seq + "44m"
+codes["bg_purple"] = esc_seq + "45m"
+codes["bg_teal"] = esc_seq + "46m"
+codes["bg_lightgray"] = esc_seq + "47m"
+codes["bg_default"] = esc_seq + "49m"
+codes["bg_darkyellow"] = codes["bg_brown"]
+
+def color(fg, bg="default", attr=["normal"]):
+ mystr = codes[fg]
+ for x in [bg]+attr:
+ mystr += codes[x]
+ return mystr
+
+
+ansi_codes = []
+for x in range(30, 38):
+ ansi_codes.append("%im" % x)
+ ansi_codes.append("%i;01m" % x)
+
+rgb_ansi_colors = ['0x000000', '0x555555', '0xAA0000', '0xFF5555', '0x00AA00',
+ '0x55FF55', '0xAA5500', '0xFFFF55', '0x0000AA', '0x5555FF', '0xAA00AA',
+ '0xFF55FF', '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF']
+
+for x in range(len(rgb_ansi_colors)):
+ codes[rgb_ansi_colors[x]] = esc_seq + ansi_codes[x]
+
+del x
+
+codes["black"] = codes["0x000000"]
+codes["darkgray"] = codes["0x555555"]
+
+codes["red"] = codes["0xFF5555"]
+codes["darkred"] = codes["0xAA0000"]
+
+codes["green"] = codes["0x55FF55"]
+codes["darkgreen"] = codes["0x00AA00"]
+
+codes["yellow"] = codes["0xFFFF55"]
+codes["brown"] = codes["0xAA5500"]
+
+codes["blue"] = codes["0x5555FF"]
+codes["darkblue"] = codes["0x0000AA"]
+
+codes["fuchsia"] = codes["0xFF55FF"]
+codes["purple"] = codes["0xAA00AA"]
+
+codes["turquoise"] = codes["0x55FFFF"]
+codes["teal"] = codes["0x00AAAA"]
+
+codes["white"] = codes["0xFFFFFF"]
+codes["lightgray"] = codes["0xAAAAAA"]
+
+codes["darkteal"] = codes["turquoise"]
+# Some terminals have darkyellow instead of brown.
+codes["0xAAAA00"] = codes["brown"]
+codes["darkyellow"] = codes["0xAAAA00"]
+
+
+
+# Colors from /etc/init.d/functions.sh
+_styles["NORMAL"] = ( "normal", )
+_styles["GOOD"] = ( "green", )
+_styles["WARN"] = ( "yellow", )
+_styles["BAD"] = ( "red", )
+_styles["HILITE"] = ( "teal", )
+_styles["BRACKET"] = ( "blue", )
+
+# Portage functions
+_styles["INFORM"] = ( "darkgreen", )
+_styles["UNMERGE_WARN"] = ( "red", )
+_styles["SECURITY_WARN"] = ( "red", )
+_styles["MERGE_LIST_PROGRESS"] = ( "yellow", )
+_styles["PKG_BLOCKER"] = ( "red", )
+_styles["PKG_BLOCKER_SATISFIED"] = ( "darkblue", )
+_styles["PKG_MERGE"] = ( "darkgreen", )
+_styles["PKG_MERGE_SYSTEM"] = ( "darkgreen", )
+_styles["PKG_MERGE_WORLD"] = ( "green", )
+_styles["PKG_BINARY_MERGE"] = ( "purple", )
+_styles["PKG_BINARY_MERGE_SYSTEM"] = ( "purple", )
+_styles["PKG_BINARY_MERGE_WORLD"] = ( "fuchsia", )
+_styles["PKG_UNINSTALL"] = ( "red", )
+_styles["PKG_NOMERGE"] = ( "darkblue", )
+_styles["PKG_NOMERGE_SYSTEM"] = ( "darkblue", )
+_styles["PKG_NOMERGE_WORLD"] = ( "blue", )
+_styles["PROMPT_CHOICE_DEFAULT"] = ( "green", )
+_styles["PROMPT_CHOICE_OTHER"] = ( "red", )
+
+def _parse_color_map(config_root='/', onerror=None):
+ """
+ Parse /etc/portage/color.map and return a dict of error codes.
+
+ @param onerror: an optional callback to handle any ParseError that would
+ otherwise be raised
+ @type onerror: callable
+ @rtype: dict
+ @return: a dictionary mapping color classes to color codes
+ """
+ global codes, _styles
+ myfile = os.path.join(config_root, COLOR_MAP_FILE)
+ ansi_code_pattern = re.compile("^[0-9;]*m$")
+ quotes = '\'"'
+ def strip_quotes(token):
+ if token[0] in quotes and token[0] == token[-1]:
+ token = token[1:-1]
+ return token
+ try:
+ lineno=0
+ for line in io.open(_unicode_encode(myfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace'):
+ lineno += 1
+
+ commenter_pos = line.find("#")
+ line = line[:commenter_pos].strip()
+
+ if len(line) == 0:
+ continue
+
+ split_line = line.split("=")
+ if len(split_line) != 2:
+ e = ParseError(_("'%s', line %s: expected exactly one occurrence of '=' operator") % \
+ (myfile, lineno))
+ raise e
+ if onerror:
+ onerror(e)
+ else:
+ raise e
+ continue
+
+ k = strip_quotes(split_line[0].strip())
+ v = strip_quotes(split_line[1].strip())
+ if not k in _styles and not k in codes:
+ e = ParseError(_("'%s', line %s: Unknown variable: '%s'") % \
+ (myfile, lineno, k))
+ if onerror:
+ onerror(e)
+ else:
+ raise e
+ continue
+ if ansi_code_pattern.match(v):
+ if k in _styles:
+ _styles[k] = ( esc_seq + v, )
+ elif k in codes:
+ codes[k] = esc_seq + v
+ else:
+ code_list = []
+ for x in v.split():
+ if x in codes:
+ if k in _styles:
+ code_list.append(x)
+ elif k in codes:
+ code_list.append(codes[x])
+ else:
+ e = ParseError(_("'%s', line %s: Undefined: '%s'") % \
+ (myfile, lineno, x))
+ if onerror:
+ onerror(e)
+ else:
+ raise e
+ if k in _styles:
+ _styles[k] = tuple(code_list)
+ elif k in codes:
+ codes[k] = "".join(code_list)
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ raise FileNotFound(myfile)
+ elif e.errno == errno.EACCES:
+ raise PermissionDenied(myfile)
+ raise
+
+def nc_len(mystr):
+ tmp = re.sub(esc_seq + "^m]+m", "", mystr);
+ return len(tmp)
+
+_legal_terms_re = re.compile(r'^(xterm|xterm-color|Eterm|aterm|rxvt|screen|kterm|rxvt-unicode|gnome|interix)')
+_disable_xtermTitle = None
+_max_xtermTitle_len = 253
+
+def xtermTitle(mystr, raw=False):
+ global _disable_xtermTitle
+ if _disable_xtermTitle is None:
+ _disable_xtermTitle = not (sys.stderr.isatty() and \
+ 'TERM' in os.environ and \
+ _legal_terms_re.match(os.environ['TERM']) is not None)
+
+ if dotitles and not _disable_xtermTitle:
+ # If the title string is too big then the terminal can
+ # misbehave. Therefore, truncate it if it's too big.
+ if len(mystr) > _max_xtermTitle_len:
+ mystr = mystr[:_max_xtermTitle_len]
+ if not raw:
+ mystr = '\x1b]0;%s\x07' % mystr
+
+ # avoid potential UnicodeEncodeError
+ mystr = _unicode_encode(mystr,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ f = sys.stderr
+ if sys.hexversion >= 0x3000000:
+ f = f.buffer
+ f.write(mystr)
+ f.flush()
+
+default_xterm_title = None
+
+def xtermTitleReset():
+ global default_xterm_title
+ if default_xterm_title is None:
+ prompt_command = os.environ.get('PROMPT_COMMAND')
+ if prompt_command == "":
+ default_xterm_title = ""
+ elif prompt_command is not None:
+ if dotitles and \
+ 'TERM' in os.environ and \
+ _legal_terms_re.match(os.environ['TERM']) is not None and \
+ sys.stderr.isatty():
+ from portage.process import find_binary, spawn
+ shell = os.environ.get("SHELL")
+ if not shell or not os.access(shell, os.EX_OK):
+ shell = find_binary("sh")
+ if shell:
+ spawn([shell, "-c", prompt_command], env=os.environ,
+ fd_pipes={0:sys.stdin.fileno(),1:sys.stderr.fileno(),
+ 2:sys.stderr.fileno()})
+ else:
+ os.system(prompt_command)
+ return
+ else:
+ pwd = os.environ.get('PWD','')
+ home = os.environ.get('HOME', '')
+ if home != '' and pwd.startswith(home):
+ pwd = '~' + pwd[len(home):]
+ default_xterm_title = '\x1b]0;%s@%s:%s\x07' % (
+ os.environ.get('LOGNAME', ''),
+ os.environ.get('HOSTNAME', '').split('.', 1)[0], pwd)
+ xtermTitle(default_xterm_title, raw=True)
+
+def notitles():
+ "turn off title setting"
+ dotitles=0
+
+def nocolor():
+ "turn off colorization"
+ global havecolor
+ havecolor=0
+
+def resetColor():
+ return codes["reset"]
+
+def style_to_ansi_code(style):
+ """
+ @param style: A style name
+ @type style: String
+ @rtype: String
+ @return: A string containing one or more ansi escape codes that are
+ used to render the given style.
+ """
+ ret = ""
+ for attr_name in _styles[style]:
+ # allow stuff that has found it's way through ansi_code_pattern
+ ret += codes.get(attr_name, attr_name)
+ return ret
+
+def colorize(color_key, text):
+ global havecolor
+ if havecolor:
+ if color_key in codes:
+ return codes[color_key] + text + codes["reset"]
+ elif color_key in _styles:
+ return style_to_ansi_code(color_key) + text + codes["reset"]
+ else:
+ return text
+ else:
+ return text
+
+compat_functions_colors = ["bold","white","teal","turquoise","darkteal",
+ "fuchsia","purple","blue","darkblue","green","darkgreen","yellow",
+ "brown","darkyellow","red","darkred"]
+
+def create_color_func(color_key):
+ def derived_func(*args):
+ newargs = list(args)
+ newargs.insert(0, color_key)
+ return colorize(*newargs)
+ return derived_func
+
+for c in compat_functions_colors:
+ globals()[c] = create_color_func(c)
+
+class ConsoleStyleFile(object):
+ """
+ A file-like object that behaves something like
+ the colorize() function. Style identifiers
+ passed in via the new_styles() method will be used to
+ apply console codes to output.
+ """
+ def __init__(self, f):
+ self._file = f
+ self._styles = None
+ self.write_listener = None
+
+ def new_styles(self, styles):
+ self._styles = styles
+
+ def write(self, s):
+ # In python-2.6, DumbWriter.send_line_break() can write
+ # non-unicode '\n' which fails with TypeError if self._file
+ # is a text stream such as io.StringIO. Therefore, make sure
+ # input is converted to unicode when necessary.
+ s = _unicode_decode(s)
+ global havecolor
+ if havecolor and self._styles:
+ styled_s = []
+ for style in self._styles:
+ styled_s.append(style_to_ansi_code(style))
+ styled_s.append(s)
+ styled_s.append(codes["reset"])
+ self._write(self._file, "".join(styled_s))
+ else:
+ self._write(self._file, s)
+ if self.write_listener:
+ self._write(self.write_listener, s)
+
+ def _write(self, f, s):
+ # avoid potential UnicodeEncodeError
+ if f in (sys.stdout, sys.stderr):
+ s = _unicode_encode(s,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ if sys.hexversion >= 0x3000000:
+ f = f.buffer
+ f.write(s)
+
+ def writelines(self, lines):
+ for s in lines:
+ self.write(s)
+
+ def flush(self):
+ self._file.flush()
+
+ def close(self):
+ self._file.close()
+
+class StyleWriter(formatter.DumbWriter):
+ """
+ This is just a DumbWriter with a hook in the new_styles() method
+ that passes a styles tuple as a single argument to a callable
+ style_listener attribute.
+ """
+ def __init__(self, **kwargs):
+ formatter.DumbWriter.__init__(self, **kwargs)
+ self.style_listener = None
+
+ def new_styles(self, styles):
+ formatter.DumbWriter.new_styles(self, styles)
+ if self.style_listener:
+ self.style_listener(styles)
+
+def get_term_size():
+ """
+ Get the number of lines and columns of the tty that is connected to
+ stdout. Returns a tuple of (lines, columns) or (-1, -1) if an error
+ occurs. The curses module is used if available, otherwise the output of
+ `stty size` is parsed.
+ """
+ if not sys.stdout.isatty():
+ return -1, -1
+ try:
+ import curses
+ try:
+ curses.setupterm()
+ return curses.tigetnum('lines'), curses.tigetnum('cols')
+ except curses.error:
+ pass
+ except ImportError:
+ pass
+ st, out = portage.subprocess_getstatusoutput('stty size')
+ if st == os.EX_OK:
+ out = out.split()
+ if len(out) == 2:
+ try:
+ return int(out[0]), int(out[1])
+ except ValueError:
+ pass
+ return -1, -1
+
+def set_term_size(lines, columns, fd):
+ """
+ Set the number of lines and columns for the tty that is connected to fd.
+ For portability, this simply calls `stty rows $lines columns $columns`.
+ """
+ from portage.process import spawn
+ cmd = ["stty", "rows", str(lines), "columns", str(columns)]
+ try:
+ spawn(cmd, env=os.environ, fd_pipes={0:fd})
+ except CommandNotFound:
+ writemsg(_("portage: stty: command not found\n"), noiselevel=-1)
+
+class EOutput(object):
+ """
+ Performs fancy terminal formatting for status and informational messages.
+
+ The provided methods produce identical terminal output to the eponymous
+ functions in the shell script C{/sbin/functions.sh} and also accept
+ identical parameters.
+
+ This is not currently a drop-in replacement however, as the output-related
+ functions in C{/sbin/functions.sh} are oriented for use mainly by system
+ init scripts and ebuilds and their output can be customized via certain
+ C{RC_*} environment variables (see C{/etc/conf.d/rc}). B{EOutput} is not
+ customizable in this manner since it's intended for more general uses.
+ Likewise, no logging is provided.
+
+ @ivar quiet: Specifies if output should be silenced.
+ @type quiet: BooleanType
+ @ivar term_columns: Width of terminal in characters. Defaults to the value
+ specified by the shell's C{COLUMNS} variable, else to the queried tty
+ size, else to C{80}.
+ @type term_columns: IntType
+ """
+
+ def __init__(self, quiet=False):
+ self.__last_e_cmd = ""
+ self.__last_e_len = 0
+ self.quiet = quiet
+ lines, columns = get_term_size()
+ if columns <= 0:
+ columns = 80
+ self.term_columns = columns
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ def _write(self, f, s):
+ # avoid potential UnicodeEncodeError
+ writemsg(s, noiselevel=-1, fd=f)
+
+ def __eend(self, caller, errno, msg):
+ if errno == 0:
+ status_brackets = colorize("BRACKET", "[ ") + colorize("GOOD", "ok") + colorize("BRACKET", " ]")
+ else:
+ status_brackets = colorize("BRACKET", "[ ") + colorize("BAD", "!!") + colorize("BRACKET", " ]")
+ if msg:
+ if caller == "eend":
+ self.eerror(msg[0])
+ elif caller == "ewend":
+ self.ewarn(msg[0])
+ if self.__last_e_cmd != "ebegin":
+ self.__last_e_len = 0
+ if not self.quiet:
+ out = sys.stdout
+ self._write(out,
+ "%*s%s\n" % ((self.term_columns - self.__last_e_len - 7),
+ "", status_brackets))
+
+ def ebegin(self, msg):
+ """
+ Shows a message indicating the start of a process.
+
+ @param msg: A very brief (shorter than one line) description of the
+ starting process.
+ @type msg: StringType
+ """
+ msg += " ..."
+ if not self.quiet:
+ self.einfon(msg)
+ self.__last_e_len = len(msg) + 3
+ self.__last_e_cmd = "ebegin"
+
+ def eend(self, errno, *msg):
+ """
+ Indicates the completion of a process, optionally displaying a message
+ via L{eerror} if the process's exit status isn't C{0}.
+
+ @param errno: A standard UNIX C{errno} code returned by processes upon
+ exit.
+ @type errno: IntType
+ @param msg: I{(optional)} An error message, typically a standard UNIX
+ error string corresponding to C{errno}.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ self.__eend("eend", errno, msg)
+ self.__last_e_cmd = "eend"
+
+ def eerror(self, msg):
+ """
+ Shows an error message.
+
+ @param msg: A very brief (shorter than one line) error message.
+ @type msg: StringType
+ """
+ out = sys.stderr
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("BAD", " * ") + msg + "\n")
+ self.__last_e_cmd = "eerror"
+
+ def einfo(self, msg):
+ """
+ Shows an informative message terminated with a newline.
+
+ @param msg: A very brief (shorter than one line) informative message.
+ @type msg: StringType
+ """
+ out = sys.stdout
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("GOOD", " * ") + msg + "\n")
+ self.__last_e_cmd = "einfo"
+
+ def einfon(self, msg):
+ """
+ Shows an informative message terminated without a newline.
+
+ @param msg: A very brief (shorter than one line) informative message.
+ @type msg: StringType
+ """
+ out = sys.stdout
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("GOOD", " * ") + msg)
+ self.__last_e_cmd = "einfon"
+
+ def ewarn(self, msg):
+ """
+ Shows a warning message.
+
+ @param msg: A very brief (shorter than one line) warning message.
+ @type msg: StringType
+ """
+ out = sys.stderr
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("WARN", " * ") + msg + "\n")
+ self.__last_e_cmd = "ewarn"
+
+ def ewend(self, errno, *msg):
+ """
+ Indicates the completion of a process, optionally displaying a message
+ via L{ewarn} if the process's exit status isn't C{0}.
+
+ @param errno: A standard UNIX C{errno} code returned by processes upon
+ exit.
+ @type errno: IntType
+ @param msg: I{(optional)} A warning message, typically a standard UNIX
+ error string corresponding to C{errno}.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ self.__eend("ewend", errno, msg)
+ self.__last_e_cmd = "ewend"
+
+class ProgressBar(object):
+ """The interface is copied from the ProgressBar class from the EasyDialogs
+ module (which is Mac only)."""
+ def __init__(self, title=None, maxval=0, label=None):
+ self._title = title
+ self._maxval = maxval
+ self._label = maxval
+ self._curval = 0
+
+ @property
+ def curval(self):
+ """
+ The current value (of type integer or long integer) of the progress
+ bar. The normal access methods coerce curval between 0 and maxval. This
+ attribute should not be altered directly.
+ """
+ return self._curval
+
+ @property
+ def maxval(self):
+ """
+ The maximum value (of type integer or long integer) of the progress
+ bar; the progress bar (thermometer style) is full when curval equals
+ maxval. If maxval is 0, the bar will be indeterminate (barber-pole).
+ This attribute should not be altered directly.
+ """
+ return self._maxval
+
+ def title(self, newstr):
+ """Sets the text in the title bar of the progress dialog to newstr."""
+ self._title = newstr
+
+ def label(self, newstr):
+ """Sets the text in the progress box of the progress dialog to newstr."""
+ self._label = newstr
+
+ def set(self, value, maxval=None):
+ """
+ Sets the progress bar's curval to value, and also maxval to max if the
+ latter is provided. value is first coerced between 0 and maxval. The
+ thermometer bar is updated to reflect the changes, including a change
+ from indeterminate to determinate or vice versa.
+ """
+ if maxval is not None:
+ self._maxval = maxval
+ if value < 0:
+ value = 0
+ elif value > self._maxval:
+ value = self._maxval
+ self._curval = value
+
+ def inc(self, n=1):
+ """Increments the progress bar's curval by n, or by 1 if n is not
+ provided. (Note that n may be negative, in which case the effect is a
+ decrement.) The progress bar is updated to reflect the change. If the
+ bar is indeterminate, this causes one ``spin'' of the barber pole. The
+ resulting curval is coerced between 0 and maxval if incrementing causes
+ it to fall outside this range.
+ """
+ self.set(self._curval+n)
+
+class TermProgressBar(ProgressBar):
+ """A tty progress bar similar to wget's."""
+ def __init__(self, **kwargs):
+ ProgressBar.__init__(self, **kwargs)
+ lines, self.term_columns = get_term_size()
+ self.file = sys.stdout
+ self._min_columns = 11
+ self._max_columns = 80
+ # for indeterminate mode, ranges from 0.0 to 1.0
+ self._position = 0.0
+
+ def set(self, value, maxval=None):
+ ProgressBar.set(self, value, maxval=maxval)
+ self._display_image(self._create_image())
+
+ def _display_image(self, image):
+ self.file.write('\r')
+ self.file.write(image)
+ self.file.flush()
+
+ def _create_image(self):
+ cols = self.term_columns
+ if cols > self._max_columns:
+ cols = self._max_columns
+ min_columns = self._min_columns
+ curval = self._curval
+ maxval = self._maxval
+ position = self._position
+ percentage_str_width = 4
+ square_brackets_width = 2
+ if cols < percentage_str_width:
+ return ""
+ bar_space = cols - percentage_str_width - square_brackets_width
+ if maxval == 0:
+ max_bar_width = bar_space-3
+ image = " "
+ if cols < min_columns:
+ return image
+ if position <= 0.5:
+ offset = 2 * position
+ else:
+ offset = 2 * (1 - position)
+ delta = 0.5 / max_bar_width
+ position += delta
+ if position >= 1.0:
+ position = 0.0
+ # make sure it touches the ends
+ if 1.0 - position < delta:
+ position = 1.0
+ if position < 0.5 and 0.5 - position < delta:
+ position = 0.5
+ self._position = position
+ bar_width = int(offset * max_bar_width)
+ image = image + "[" + (bar_width * " ") + \
+ "<=>" + ((max_bar_width - bar_width) * " ") + "]"
+ return image
+ else:
+ percentage = int(100 * float(curval) / maxval)
+ if percentage == 100:
+ percentage_str_width += 1
+ bar_space -= 1
+ max_bar_width = bar_space - 1
+ image = ("%d%% " % percentage).rjust(percentage_str_width)
+ if cols < min_columns:
+ return image
+ offset = float(curval) / maxval
+ bar_width = int(offset * max_bar_width)
+ image = image + "[" + (bar_width * "=") + \
+ ">" + ((max_bar_width - bar_width) * " ") + "]"
+ return image
+
+_color_map_loaded = False
+
+def _init(config_root='/'):
+ """
+ Load color.map from the given config_root. This is called automatically
+ on first access of the codes or _styles attributes (unless it has already
+ been called for some other reason).
+ """
+
+ global _color_map_loaded, codes, _styles
+ if _color_map_loaded:
+ return
+
+ _color_map_loaded = True
+ codes = object.__getattribute__(codes, '_attr')
+ _styles = object.__getattribute__(_styles, '_attr')
+
+ for k, v in codes.items():
+ codes[k] = _unicode_decode(v)
+
+ for k, v in _styles.items():
+ _styles[k] = _unicode_decode(v)
+
+ try:
+ _parse_color_map(config_root=config_root,
+ onerror=lambda e: writemsg("%s\n" % str(e), noiselevel=-1))
+ except FileNotFound:
+ pass
+ except PermissionDenied as e:
+ writemsg(_("Permission denied: '%s'\n") % str(e), noiselevel=-1)
+ del e
+ except PortageException as e:
+ writemsg("%s\n" % str(e), noiselevel=-1)
+ del e
+
+class _LazyInitColorMap(portage.proxy.objectproxy.ObjectProxy):
+
+ __slots__ = ('_attr',)
+
+ def __init__(self, attr):
+ portage.proxy.objectproxy.ObjectProxy.__init__(self)
+ object.__setattr__(self, '_attr', attr)
+
+ def _get_target(self):
+ _init()
+ return object.__getattribute__(self, '_attr')
+
+codes = _LazyInitColorMap(codes)
+_styles = _LazyInitColorMap(_styles)
diff --git a/portage_with_autodep/pym/portage/package/__init__.py b/portage_with_autodep/pym/portage/package/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/package/ebuild/__init__.py b/portage_with_autodep/pym/portage/package/ebuild/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py
new file mode 100644
index 0000000..cd22554
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py
@@ -0,0 +1,284 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'KeywordsManager',
+)
+
+from _emerge.Package import Package
+from portage import os
+from portage.dep import ExtendedAtomDict, _repo_separator, _slot_separator
+from portage.localization import _
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+from portage.util import grabdict_package, stack_lists, writemsg
+from portage.versions import cpv_getkey
+
+class KeywordsManager(object):
+ """Manager class to handle keywords processing and validation"""
+
+ def __init__(self, profiles, abs_user_config, user_config=True,
+ global_accept_keywords=""):
+ self._pkeywords_list = []
+ rawpkeywords = [grabdict_package(
+ os.path.join(x, "package.keywords"), recursive=1,
+ verify_eapi=True) \
+ for x in profiles]
+ for pkeyworddict in rawpkeywords:
+ if not pkeyworddict:
+ # Omit non-existent files from the stack.
+ continue
+ cpdict = {}
+ for k, v in pkeyworddict.items():
+ cpdict.setdefault(k.cp, {})[k] = v
+ self._pkeywords_list.append(cpdict)
+ self._pkeywords_list = tuple(self._pkeywords_list)
+
+ self._p_accept_keywords = []
+ raw_p_accept_keywords = [grabdict_package(
+ os.path.join(x, "package.accept_keywords"), recursive=1,
+ verify_eapi=True) \
+ for x in profiles]
+ for d in raw_p_accept_keywords:
+ if not d:
+ # Omit non-existent files from the stack.
+ continue
+ cpdict = {}
+ for k, v in d.items():
+ cpdict.setdefault(k.cp, {})[k] = tuple(v)
+ self._p_accept_keywords.append(cpdict)
+ self._p_accept_keywords = tuple(self._p_accept_keywords)
+
+ self.pkeywordsdict = ExtendedAtomDict(dict)
+
+ if user_config:
+ pkgdict = grabdict_package(
+ os.path.join(abs_user_config, "package.keywords"),
+ recursive=1, allow_wildcard=True, allow_repo=True,
+ verify_eapi=False)
+
+ for k, v in grabdict_package(
+ os.path.join(abs_user_config, "package.accept_keywords"),
+ recursive=1, allow_wildcard=True, allow_repo=True,
+ verify_eapi=False).items():
+ pkgdict.setdefault(k, []).extend(v)
+
+ accept_keywords_defaults = global_accept_keywords.split()
+ accept_keywords_defaults = tuple('~' + keyword for keyword in \
+ accept_keywords_defaults if keyword[:1] not in "~-")
+ for k, v in pkgdict.items():
+ # default to ~arch if no specific keyword is given
+ if not v:
+ v = accept_keywords_defaults
+ else:
+ v = tuple(v)
+ self.pkeywordsdict.setdefault(k.cp, {})[k] = v
+
+
+ def getKeywords(self, cpv, slot, keywords, repo):
+ cp = cpv_getkey(cpv)
+ pkg = "".join((cpv, _slot_separator, slot))
+ if repo and repo != Package.UNKNOWN_REPO:
+ pkg = "".join((pkg, _repo_separator, repo))
+ keywords = [[x for x in keywords.split() if x != "-*"]]
+ for pkeywords_dict in self._pkeywords_list:
+ cpdict = pkeywords_dict.get(cp)
+ if cpdict:
+ pkg_keywords = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_keywords:
+ keywords.extend(pkg_keywords)
+ return stack_lists(keywords, incremental=True)
+
+
+ def getMissingKeywords(self,
+ cpv,
+ slot,
+ keywords,
+ repo,
+ global_accept_keywords,
+ backuped_accept_keywords):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty
+ and the the ** keyword has not been accepted, the returned list will
+ contain ** alone (in order to distinguish from the case of "none
+ missing").
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: A list of KEYWORDS that have not been accepted.
+ """
+
+ mygroups = self.getKeywords(cpv, slot, keywords, repo)
+ # Repoman may modify this attribute as necessary.
+ pgroups = global_accept_keywords.split()
+
+ unmaskgroups = self.getPKeywords(cpv, slot, repo,
+ global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+
+ # Hack: Need to check the env directly here as otherwise stacking
+ # doesn't work properly as negative values are lost in the config
+ # object (bug #139600)
+ egroups = backuped_accept_keywords.split()
+
+ if unmaskgroups or egroups:
+ pgroups = self._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ return self._getMissingKeywords(cpv, pgroups, mygroups)
+
+
+ def getRawMissingKeywords(self,
+ cpv,
+ slot,
+ keywords,
+ repo,
+ global_accept_keywords):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty,
+ the returned list will contain ** alone (in order to distinguish
+ from the case of "none missing"). This DOES NOT apply any user config
+ package.accept_keywords acceptance.
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @rtype: List
+ @return: lists of KEYWORDS that have not been accepted
+ and the keywords it looked for.
+ """
+
+ mygroups = self.getKeywords(cpv, slot, keywords, repo)
+ pgroups = global_accept_keywords.split()
+ pgroups = set(pgroups)
+ return self._getMissingKeywords(cpv, pgroups, mygroups)
+
+
+ @staticmethod
+ def _getEgroups(egroups, mygroups):
+ """gets any keywords defined in the environment
+
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: list of KEYWORDS that have been accepted
+ """
+ mygroups = list(mygroups)
+ mygroups.extend(egroups)
+ inc_pgroups = set()
+ for x in mygroups:
+ if x[:1] == "-":
+ if x == "-*":
+ inc_pgroups.clear()
+ else:
+ inc_pgroups.discard(x[1:])
+ else:
+ inc_pgroups.add(x)
+ return inc_pgroups
+
+
+ @staticmethod
+ def _getMissingKeywords(cpv, pgroups, mygroups):
+ """Determines the missing keywords
+
+ @param pgroups: The pkg keywords accepted
+ @type pgroups: list
+ @param mygroups: The ebuild keywords
+ @type mygroups: list
+ """
+ match = False
+ hasstable = False
+ hastesting = False
+ for gp in mygroups:
+ if gp == "*" or (gp == "-*" and len(mygroups) == 1):
+ writemsg(_("--- WARNING: Package '%(cpv)s' uses"
+ " '%(keyword)s' keyword.\n") % {"cpv": cpv, "keyword": gp},
+ noiselevel=-1)
+ if gp == "*":
+ match = True
+ break
+ elif gp in pgroups:
+ match = True
+ break
+ elif gp.startswith("~"):
+ hastesting = True
+ elif not gp.startswith("-"):
+ hasstable = True
+ if not match and \
+ ((hastesting and "~*" in pgroups) or \
+ (hasstable and "*" in pgroups) or "**" in pgroups):
+ match = True
+ if match:
+ missing = []
+ else:
+ if not mygroups:
+ # If KEYWORDS is empty then we still have to return something
+ # in order to distinguish from the case of "none missing".
+ mygroups.append("**")
+ missing = mygroups
+ return missing
+
+
+ def getPKeywords(self, cpv, slot, repo, global_accept_keywords):
+ """Gets any package.keywords settings for cp for the given
+ cpv, slot and repo
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: list of KEYWORDS that have been accepted
+ """
+
+ pgroups = global_accept_keywords.split()
+ cp = cpv_getkey(cpv)
+
+ unmaskgroups = []
+ if self._p_accept_keywords:
+ cpv_slot = "%s:%s" % (cpv, slot)
+ accept_keywords_defaults = tuple('~' + keyword for keyword in \
+ pgroups if keyword[:1] not in "~-")
+ for d in self._p_accept_keywords:
+ cpdict = d.get(cp)
+ if cpdict:
+ pkg_accept_keywords = \
+ ordered_by_atom_specificity(cpdict, cpv_slot)
+ if pkg_accept_keywords:
+ for x in pkg_accept_keywords:
+ if not x:
+ x = accept_keywords_defaults
+ unmaskgroups.extend(x)
+
+ pkgdict = self.pkeywordsdict.get(cp)
+ if pkgdict:
+ cpv_slot = "%s:%s" % (cpv, slot)
+ pkg_accept_keywords = \
+ ordered_by_atom_specificity(pkgdict, cpv_slot, repo=repo)
+ if pkg_accept_keywords:
+ for x in pkg_accept_keywords:
+ unmaskgroups.extend(x)
+ return unmaskgroups
+
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py
new file mode 100644
index 0000000..effd55b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py
@@ -0,0 +1,236 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'LicenseManager',
+)
+
+from portage import os
+from portage.dep import ExtendedAtomDict, use_reduce
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabdict, grabdict_package, writemsg
+from portage.versions import cpv_getkey
+
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+
+
+class LicenseManager(object):
+
+ def __init__(self, license_group_locations, abs_user_config, user_config=True):
+
+ self._accept_license_str = None
+ self._accept_license = None
+ self._license_groups = {}
+ self._plicensedict = ExtendedAtomDict(dict)
+ self._undef_lic_groups = set()
+
+ if user_config:
+ license_group_locations = list(license_group_locations) + [abs_user_config]
+
+ self._read_license_groups(license_group_locations)
+
+ if user_config:
+ self._read_user_config(abs_user_config)
+
+ def _read_user_config(self, abs_user_config):
+ licdict = grabdict_package(os.path.join(
+ abs_user_config, "package.license"), recursive=1, allow_wildcard=True, allow_repo=True, verify_eapi=False)
+ for k, v in licdict.items():
+ self._plicensedict.setdefault(k.cp, {})[k] = \
+ self.expandLicenseTokens(v)
+
+ def _read_license_groups(self, locations):
+ for loc in locations:
+ for k, v in grabdict(
+ os.path.join(loc, "license_groups")).items():
+ self._license_groups.setdefault(k, []).extend(v)
+
+ for k, v in self._license_groups.items():
+ self._license_groups[k] = frozenset(v)
+
+ def extract_global_changes(self, old=""):
+ ret = old
+ atom_license_map = self._plicensedict.get("*/*")
+ if atom_license_map is not None:
+ v = atom_license_map.pop("*/*", None)
+ if v is not None:
+ ret = " ".join(v)
+ if old:
+ ret = old + " " + ret
+ if not atom_license_map:
+ #No tokens left in atom_license_map, remove it.
+ del self._plicensedict["*/*"]
+ return ret
+
+ def expandLicenseTokens(self, tokens):
+ """ Take a token from ACCEPT_LICENSE or package.license and expand it
+ if it's a group token (indicated by @) or just return it if it's not a
+ group. If a group is negated then negate all group elements."""
+ expanded_tokens = []
+ for x in tokens:
+ expanded_tokens.extend(self._expandLicenseToken(x, None))
+ return expanded_tokens
+
+ def _expandLicenseToken(self, token, traversed_groups):
+ negate = False
+ rValue = []
+ if token.startswith("-"):
+ negate = True
+ license_name = token[1:]
+ else:
+ license_name = token
+ if not license_name.startswith("@"):
+ rValue.append(token)
+ return rValue
+ group_name = license_name[1:]
+ if traversed_groups is None:
+ traversed_groups = set()
+ license_group = self._license_groups.get(group_name)
+ if group_name in traversed_groups:
+ writemsg(_("Circular license group reference"
+ " detected in '%s'\n") % group_name, noiselevel=-1)
+ rValue.append("@"+group_name)
+ elif license_group:
+ traversed_groups.add(group_name)
+ for l in license_group:
+ if l.startswith("-"):
+ writemsg(_("Skipping invalid element %s"
+ " in license group '%s'\n") % (l, group_name),
+ noiselevel=-1)
+ else:
+ rValue.extend(self._expandLicenseToken(l, traversed_groups))
+ else:
+ if self._license_groups and \
+ group_name not in self._undef_lic_groups:
+ self._undef_lic_groups.add(group_name)
+ writemsg(_("Undefined license group '%s'\n") % group_name,
+ noiselevel=-1)
+ rValue.append("@"+group_name)
+ if negate:
+ rValue = ["-" + token for token in rValue]
+ return rValue
+
+ def _getPkgAcceptLicense(self, cpv, slot, repo):
+ """
+ Get an ACCEPT_LICENSE list, accounting for package.license.
+ """
+ accept_license = self._accept_license
+ cp = cpv_getkey(cpv)
+ cpdict = self._plicensedict.get(cp)
+ if cpdict:
+ cpv_slot = "%s:%s" % (cpv, slot)
+ plicence_list = ordered_by_atom_specificity(cpdict, cpv_slot, repo)
+ if plicence_list:
+ accept_license = list(self._accept_license)
+ for x in plicence_list:
+ accept_license.extend(x)
+ return accept_license
+
+ def get_prunned_accept_license(self, cpv, use, lic, slot, repo):
+ """
+ Generate a pruned version of ACCEPT_LICENSE, by intersection with
+ LICENSE. This is required since otherwise ACCEPT_LICENSE might be
+ too big (bigger than ARG_MAX), causing execve() calls to fail with
+ E2BIG errors as in bug #262647.
+ """
+ try:
+ licenses = set(use_reduce(lic, uselist=use, flat=True))
+ except InvalidDependString:
+ licenses = set()
+ licenses.discard('||')
+
+ accept_license = self._getPkgAcceptLicense(cpv, slot, repo)
+
+ if accept_license:
+ acceptable_licenses = set()
+ for x in accept_license:
+ if x == '*':
+ acceptable_licenses.update(licenses)
+ elif x == '-*':
+ acceptable_licenses.clear()
+ elif x[:1] == '-':
+ acceptable_licenses.discard(x[1:])
+ elif x in licenses:
+ acceptable_licenses.add(x)
+
+ licenses = acceptable_licenses
+ return ' '.join(sorted(licenses))
+
+ def getMissingLicenses(self, cpv, use, lic, slot, repo):
+ """
+ Take a LICENSE string and return a list of any licenses that the user
+ may need to accept for the given package. The returned list will not
+ contain any licenses that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.license support)
+ @type cpv: String
+ @param use: "USE" from the cpv's metadata
+ @type use: String
+ @param lic: "LICENSE" from the cpv's metadata
+ @type lic: String
+ @param slot: "SLOT" from the cpv's metadata
+ @type slot: String
+ @rtype: List
+ @return: A list of licenses that have not been accepted.
+ """
+
+ licenses = set(use_reduce(lic, matchall=1, flat=True))
+ licenses.discard('||')
+
+ acceptable_licenses = set()
+ for x in self._getPkgAcceptLicense(cpv, slot, repo):
+ if x == '*':
+ acceptable_licenses.update(licenses)
+ elif x == '-*':
+ acceptable_licenses.clear()
+ elif x[:1] == '-':
+ acceptable_licenses.discard(x[1:])
+ else:
+ acceptable_licenses.add(x)
+
+ license_str = lic
+ if "?" in license_str:
+ use = use.split()
+ else:
+ use = []
+
+ license_struct = use_reduce(license_str, uselist=use, opconvert=True)
+ return self._getMaskedLicenses(license_struct, acceptable_licenses)
+
+ def _getMaskedLicenses(self, license_struct, acceptable_licenses):
+ if not license_struct:
+ return []
+ if license_struct[0] == "||":
+ ret = []
+ for element in license_struct[1:]:
+ if isinstance(element, list):
+ if element:
+ tmp = self._getMaskedLicenses(element, acceptable_licenses)
+ if not tmp:
+ return []
+ ret.extend(tmp)
+ else:
+ if element in acceptable_licenses:
+ return []
+ ret.append(element)
+ # Return all masked licenses, since we don't know which combination
+ # (if any) the user will decide to unmask.
+ return ret
+
+ ret = []
+ for element in license_struct:
+ if isinstance(element, list):
+ if element:
+ ret.extend(self._getMaskedLicenses(element,
+ acceptable_licenses))
+ else:
+ if element not in acceptable_licenses:
+ ret.append(element)
+ return ret
+
+ def set_accept_license_str(self, accept_license_str):
+ if accept_license_str != self._accept_license_str:
+ self._accept_license_str = accept_license_str
+ self._accept_license = tuple(self.expandLicenseTokens(accept_license_str.split()))
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py
new file mode 100644
index 0000000..c2b115b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py
@@ -0,0 +1,182 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'LocationsManager',
+)
+
+import io
+from portage import os, eapi_is_supported, _encodings, _unicode_encode
+from portage.const import CUSTOM_PROFILE_PATH, GLOBAL_CONFIG_PATH, \
+ PROFILE_PATH, USER_CONFIG_PATH
+from portage.exception import DirectoryNotFound, ParseError
+from portage.localization import _
+from portage.util import ensure_dirs, grabfile, \
+ normalize_path, shlex_split, writemsg
+
+
+class LocationsManager(object):
+
+ def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
+ target_root=None):
+ self.user_profile_dir = None
+ self._local_repo_conf_path = None
+ self.eprefix = eprefix
+ self.config_root = config_root
+ self.target_root = target_root
+ self._user_config = local_config
+
+ if self.eprefix is None:
+ self.eprefix = ""
+
+ if self.config_root is None:
+ self.config_root = self.eprefix + os.sep
+
+ self.config_root = normalize_path(os.path.abspath(
+ self.config_root)).rstrip(os.path.sep) + os.path.sep
+
+ self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
+ self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
+
+ if config_profile_path is None:
+ config_profile_path = \
+ os.path.join(self.config_root, PROFILE_PATH)
+ if os.path.isdir(config_profile_path):
+ self.profile_path = config_profile_path
+ else:
+ config_profile_path = \
+ os.path.join(self.abs_user_config, 'make.profile')
+ if os.path.isdir(config_profile_path):
+ self.profile_path = config_profile_path
+ else:
+ self.profile_path = None
+ else:
+ # NOTE: repoman may pass in an empty string
+ # here, in order to create an empty profile
+ # for checking dependencies of packages with
+ # empty KEYWORDS.
+ self.profile_path = config_profile_path
+
+
+ # The symlink might not exist or might not be a symlink.
+ self.profiles = []
+ if self.profile_path:
+ try:
+ self._addProfile(os.path.realpath(self.profile_path))
+ except ParseError as e:
+ writemsg(_("!!! Unable to parse profile: '%s'\n") % \
+ self.profile_path, noiselevel=-1)
+ writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
+ self.profiles = []
+
+ if self._user_config and self.profiles:
+ custom_prof = os.path.join(
+ self.config_root, CUSTOM_PROFILE_PATH)
+ if os.path.exists(custom_prof):
+ self.user_profile_dir = custom_prof
+ self.profiles.append(custom_prof)
+ del custom_prof
+
+ self.profiles = tuple(self.profiles)
+
+ def _check_var_directory(self, varname, var):
+ if not os.path.isdir(var):
+ writemsg(_("!!! Error: %s='%s' is not a directory. "
+ "Please correct this.\n") % (varname, var),
+ noiselevel=-1)
+ raise DirectoryNotFound(var)
+
+ def _addProfile(self, currentPath):
+ parentsFile = os.path.join(currentPath, "parent")
+ eapi_file = os.path.join(currentPath, "eapi")
+ try:
+ eapi = io.open(_unicode_encode(eapi_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace'
+ ).readline().strip()
+ except IOError:
+ pass
+ else:
+ if not eapi_is_supported(eapi):
+ raise ParseError(_(
+ "Profile contains unsupported "
+ "EAPI '%s': '%s'") % \
+ (eapi, os.path.realpath(eapi_file),))
+ if os.path.exists(parentsFile):
+ parents = grabfile(parentsFile)
+ if not parents:
+ raise ParseError(
+ _("Empty parent file: '%s'") % parentsFile)
+ for parentPath in parents:
+ parentPath = normalize_path(os.path.join(
+ currentPath, parentPath))
+ if os.path.exists(parentPath):
+ self._addProfile(parentPath)
+ else:
+ raise ParseError(
+ _("Parent '%s' not found: '%s'") % \
+ (parentPath, parentsFile))
+ self.profiles.append(currentPath)
+
+ def set_root_override(self, root_overwrite=None):
+ # Allow ROOT setting to come from make.conf if it's not overridden
+ # by the constructor argument (from the calling environment).
+ if self.target_root is None and root_overwrite is not None:
+ self.target_root = root_overwrite
+ if not self.target_root.strip():
+ self.target_root = None
+ if self.target_root is None:
+ self.target_root = "/"
+
+ self.target_root = normalize_path(os.path.abspath(
+ self.target_root)).rstrip(os.path.sep) + os.path.sep
+
+ ensure_dirs(self.target_root)
+ self._check_var_directory("ROOT", self.target_root)
+
+ self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep
+
+ # make.globals should not be relative to config_root
+ # because it only contains constants. However, if EPREFIX
+ # is set then there are two possible scenarios:
+ # 1) If $ROOT == "/" then make.globals should be
+ # relative to EPREFIX.
+ # 2) If $ROOT != "/" then the correct location of
+ # make.globals needs to be specified in the constructor
+ # parameters, since it's a property of the host system
+ # (and the current config represents the target system).
+ self.global_config_path = GLOBAL_CONFIG_PATH
+ if self.eprefix:
+ if self.target_root == "/":
+ # case (1) above
+ self.global_config_path = os.path.join(self.eprefix,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ else:
+ # case (2) above
+ # For now, just assume make.globals is relative
+ # to EPREFIX.
+ # TODO: Pass in more info to the constructor,
+ # so we know the host system configuration.
+ self.global_config_path = os.path.join(self.eprefix,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+
+ def set_port_dirs(self, portdir, portdir_overlay):
+ self.portdir = portdir
+ self.portdir_overlay = portdir_overlay
+ if self.portdir_overlay is None:
+ self.portdir_overlay = ""
+
+ self.overlay_profiles = []
+ for ov in shlex_split(self.portdir_overlay):
+ ov = normalize_path(ov)
+ profiles_dir = os.path.join(ov, "profiles")
+ if os.path.isdir(profiles_dir):
+ self.overlay_profiles.append(profiles_dir)
+
+ self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
+ self.profile_and_user_locations = self.profile_locations[:]
+ if self._user_config:
+ self.profile_and_user_locations.append(self.abs_user_config)
+
+ self.profile_locations = tuple(self.profile_locations)
+ self.profile_and_user_locations = tuple(self.profile_and_user_locations)
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py
new file mode 100644
index 0000000..df93e10
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py
@@ -0,0 +1,189 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'MaskManager',
+)
+
+from portage import os
+from portage.dep import ExtendedAtomDict, match_from_list, _repo_separator, _slot_separator
+from portage.util import append_repo, grabfile_package, stack_lists
+from portage.versions import cpv_getkey
+from _emerge.Package import Package
+
+class MaskManager(object):
+
+ def __init__(self, repositories, profiles, abs_user_config,
+ user_config=True, strict_umatched_removal=False):
+ self._punmaskdict = ExtendedAtomDict(list)
+ self._pmaskdict = ExtendedAtomDict(list)
+ # Preserves atoms that are eliminated by negative
+ # incrementals in user_pkgmasklines.
+ self._pmaskdict_raw = ExtendedAtomDict(list)
+
+ #Read profile/package.mask from every repo.
+ #Repositories inherit masks from their parent profiles and
+ #are able to remove mask from them with -atoms.
+ #Such a removal affects only the current repo, but not the parent.
+ #Add ::repo specs to every atom to make sure atoms only affect
+ #packages from the current repo.
+
+ # Cache the repository-wide package.mask files as a particular
+ # repo may be often referenced by others as the master.
+ pmask_cache = {}
+
+ def grab_pmask(loc):
+ if loc not in pmask_cache:
+ pmask_cache[loc] = grabfile_package(
+ os.path.join(loc, "profiles", "package.mask"),
+ recursive=1, remember_source_file=True, verify_eapi=True)
+ return pmask_cache[loc]
+
+ repo_pkgmasklines = []
+ for repo in repositories.repos_with_profiles():
+ lines = []
+ repo_lines = grab_pmask(repo.location)
+ for master in repo.masters:
+ master_lines = grab_pmask(master.location)
+ lines.append(stack_lists([master_lines, repo_lines], incremental=1,
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal))
+ if not repo.masters:
+ lines.append(stack_lists([repo_lines], incremental=1,
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal))
+ repo_pkgmasklines.extend(append_repo(stack_lists(lines), repo.name, remember_source_file=True))
+
+ repo_pkgunmasklines = []
+ for repo in repositories.repos_with_profiles():
+ repo_lines = grabfile_package(os.path.join(repo.location, "profiles", "package.unmask"), \
+ recursive=1, remember_source_file=True, verify_eapi=True)
+ lines = stack_lists([repo_lines], incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+ repo_pkgunmasklines.extend(append_repo(lines, repo.name, remember_source_file=True))
+
+ #Read package.mask from the user's profile. Stack them in the end
+ #to allow profiles to override masks from their parent profiles.
+ profile_pkgmasklines = []
+ profile_pkgunmasklines = []
+ for x in profiles:
+ profile_pkgmasklines.append(grabfile_package(
+ os.path.join(x, "package.mask"), recursive=1, remember_source_file=True, verify_eapi=True))
+ profile_pkgunmasklines.append(grabfile_package(
+ os.path.join(x, "package.unmask"), recursive=1, remember_source_file=True, verify_eapi=True))
+ profile_pkgmasklines = stack_lists(profile_pkgmasklines, incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+ profile_pkgunmasklines = stack_lists(profile_pkgunmasklines, incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+
+ #Read /etc/portage/package.mask. Don't stack it to allow the user to
+ #remove mask atoms from everywhere with -atoms.
+ user_pkgmasklines = []
+ user_pkgunmasklines = []
+ if user_config:
+ user_pkgmasklines = grabfile_package(
+ os.path.join(abs_user_config, "package.mask"), recursive=1, \
+ allow_wildcard=True, allow_repo=True, remember_source_file=True, verify_eapi=False)
+ user_pkgunmasklines = grabfile_package(
+ os.path.join(abs_user_config, "package.unmask"), recursive=1, \
+ allow_wildcard=True, allow_repo=True, remember_source_file=True, verify_eapi=False)
+
+ #Stack everything together. At this point, only user_pkgmasklines may contain -atoms.
+ #Don't warn for unmatched -atoms here, since we don't do it for any other user config file.
+ raw_pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+ pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines, user_pkgmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+ pkgunmasklines = stack_lists([repo_pkgunmasklines, profile_pkgunmasklines, user_pkgunmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+
+ for x, source_file in raw_pkgmasklines:
+ self._pmaskdict_raw.setdefault(x.cp, []).append(x)
+
+ for x, source_file in pkgmasklines:
+ self._pmaskdict.setdefault(x.cp, []).append(x)
+
+ for x, source_file in pkgunmasklines:
+ self._punmaskdict.setdefault(x.cp, []).append(x)
+
+ for d in (self._pmaskdict_raw, self._pmaskdict, self._punmaskdict):
+ for k, v in d.items():
+ d[k] = tuple(v)
+
+ def _getMaskAtom(self, cpv, slot, repo, unmask_atoms=None):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask. PROVIDE
+ is not checked, so atoms will not be found for old-style virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @param unmask_atoms: if desired pass in self._punmaskdict.get(cp)
+ @type unmask_atoms: list
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ cp = cpv_getkey(cpv)
+ mask_atoms = self._pmaskdict.get(cp)
+ if mask_atoms:
+ pkg = "".join((cpv, _slot_separator, slot))
+ if repo and repo != Package.UNKNOWN_REPO:
+ pkg = "".join((pkg, _repo_separator, repo))
+ pkg_list = [pkg]
+ for x in mask_atoms:
+ if not match_from_list(x, pkg_list):
+ continue
+ if unmask_atoms:
+ for y in unmask_atoms:
+ if match_from_list(y, pkg_list):
+ return None
+ return x
+ return None
+
+
+ def getMaskAtom(self, cpv, slot, repo):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask. PROVIDE
+ is not checked, so atoms will not be found for old-style virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ cp = cpv_getkey(cpv)
+ return self._getMaskAtom(cpv, slot, repo, self._punmaskdict.get(cp))
+
+
+ def getRawMaskAtom(self, cpv, slot, repo):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists. It HAS NOT! been cancelled by any package.unmask.
+ PROVIDE is not checked, so atoms will not be found for old-style
+ virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ return self._getMaskAtom(cpv, slot, repo)
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py
new file mode 100644
index 0000000..d7ef0f6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py
@@ -0,0 +1,235 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'UseManager',
+)
+
+from _emerge.Package import Package
+from portage import os
+from portage.dep import ExtendedAtomDict, remove_slot, _get_useflag_re
+from portage.localization import _
+from portage.util import grabfile, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
+from portage.versions import cpv_getkey
+
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+
+class UseManager(object):
+
+ def __init__(self, repositories, profiles, abs_user_config, user_config=True):
+ # file variable
+ #--------------------------------
+ # repositories
+ #--------------------------------
+ # use.mask _repo_usemask_dict
+ # use.force _repo_useforce_dict
+ # package.use.mask _repo_pusemask_dict
+ # package.use.force _repo_puseforce_dict
+ #--------------------------------
+ # profiles
+ #--------------------------------
+ # use.mask _usemask_list
+ # use.force _useforce_list
+ # package.use.mask _pusemask_list
+ # package.use _pkgprofileuse
+ # package.use.force _puseforce_list
+ #--------------------------------
+ # user config
+ #--------------------------------
+ # package.use _pusedict
+
+ # Dynamic variables tracked by the config class
+ #--------------------------------
+ # profiles
+ #--------------------------------
+ # usemask
+ # useforce
+ #--------------------------------
+ # user config
+ #--------------------------------
+ # puse
+
+ self._repo_usemask_dict = self._parse_repository_files_to_dict_of_tuples("use.mask", repositories)
+ self._repo_useforce_dict = self._parse_repository_files_to_dict_of_tuples("use.force", repositories)
+ self._repo_pusemask_dict = self._parse_repository_files_to_dict_of_dicts("package.use.mask", repositories)
+ self._repo_puseforce_dict = self._parse_repository_files_to_dict_of_dicts("package.use.force", repositories)
+ self._repo_puse_dict = self._parse_repository_files_to_dict_of_dicts("package.use", repositories)
+
+ self._usemask_list = self._parse_profile_files_to_tuple_of_tuples("use.mask", profiles)
+ self._useforce_list = self._parse_profile_files_to_tuple_of_tuples("use.force", profiles)
+ self._pusemask_list = self._parse_profile_files_to_tuple_of_dicts("package.use.mask", profiles)
+ self._pkgprofileuse = self._parse_profile_files_to_tuple_of_dicts("package.use", profiles, juststrings=True)
+ self._puseforce_list = self._parse_profile_files_to_tuple_of_dicts("package.use.force", profiles)
+
+ self._pusedict = self._parse_user_files_to_extatomdict("package.use", abs_user_config, user_config)
+
+ self.repositories = repositories
+
+ def _parse_file_to_tuple(self, file_name):
+ ret = []
+ lines = grabfile(file_name, recursive=1)
+ eapi = read_corresponding_eapi_file(file_name)
+ useflag_re = _get_useflag_re(eapi)
+ for prefixed_useflag in lines:
+ if prefixed_useflag[:1] == "-":
+ useflag = prefixed_useflag[1:]
+ else:
+ useflag = prefixed_useflag
+ if useflag_re.match(useflag) is None:
+ writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
+ (file_name, prefixed_useflag), noiselevel=-1)
+ else:
+ ret.append(prefixed_useflag)
+ return tuple(ret)
+
+ def _parse_file_to_dict(self, file_name, juststrings=False):
+ ret = {}
+ location_dict = {}
+ file_dict = grabdict_package(file_name, recursive=1, verify_eapi=True)
+ eapi = read_corresponding_eapi_file(file_name)
+ useflag_re = _get_useflag_re(eapi)
+ for k, v in file_dict.items():
+ useflags = []
+ for prefixed_useflag in v:
+ if prefixed_useflag[:1] == "-":
+ useflag = prefixed_useflag[1:]
+ else:
+ useflag = prefixed_useflag
+ if useflag_re.match(useflag) is None:
+ writemsg(_("--- Invalid USE flag for '%s' in '%s': '%s'\n") %
+ (k, file_name, prefixed_useflag), noiselevel=-1)
+ else:
+ useflags.append(prefixed_useflag)
+ location_dict.setdefault(k, []).extend(useflags)
+ for k, v in location_dict.items():
+ if juststrings:
+ v = " ".join(v)
+ else:
+ v = tuple(v)
+ ret.setdefault(k.cp, {})[k] = v
+ return ret
+
+ def _parse_user_files_to_extatomdict(self, file_name, location, user_config):
+ ret = ExtendedAtomDict(dict)
+ if user_config:
+ pusedict = grabdict_package(
+ os.path.join(location, file_name), recursive=1, allow_wildcard=True, allow_repo=True, verify_eapi=False)
+ for k, v in pusedict.items():
+ ret.setdefault(k.cp, {})[k] = tuple(v)
+
+ return ret
+
+ def _parse_repository_files_to_dict_of_tuples(self, file_name, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ ret[repo.name] = self._parse_file_to_tuple(os.path.join(repo.location, "profiles", file_name))
+ return ret
+
+ def _parse_repository_files_to_dict_of_dicts(self, file_name, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ ret[repo.name] = self._parse_file_to_dict(os.path.join(repo.location, "profiles", file_name))
+ return ret
+
+ def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations):
+ return tuple(self._parse_file_to_tuple(os.path.join(profile, file_name)) for profile in locations)
+
+ def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations, juststrings=False):
+ return tuple(self._parse_file_to_dict(os.path.join(profile, file_name), juststrings) for profile in locations)
+
+ def getUseMask(self, pkg=None):
+ if pkg is None:
+ return frozenset(stack_lists(
+ self._usemask_list, incremental=True))
+
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ cp = cpv_getkey(remove_slot(pkg))
+ usemask = []
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ usemask.append(self._repo_usemask_dict.get(repo, {}))
+ cpdict = self._repo_pusemask_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+ for i, pusemask_dict in enumerate(self._pusemask_list):
+ if self._usemask_list[i]:
+ usemask.append(self._usemask_list[i])
+ cpdict = pusemask_dict.get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+ return frozenset(stack_lists(usemask, incremental=True))
+
+ def getUseForce(self, pkg=None):
+ if pkg is None:
+ return frozenset(stack_lists(
+ self._useforce_list, incremental=True))
+
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ cp = cpv_getkey(remove_slot(pkg))
+ useforce = []
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ useforce.append(self._repo_useforce_dict.get(repo, {}))
+ cpdict = self._repo_puseforce_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+ for i, puseforce_dict in enumerate(self._puseforce_list):
+ if self._useforce_list[i]:
+ useforce.append(self._useforce_list[i])
+ cpdict = puseforce_dict.get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+ return frozenset(stack_lists(useforce, incremental=True))
+
+ def getPUSE(self, pkg):
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ cp = cpv_getkey(remove_slot(pkg))
+ ret = ""
+ cpdict = self._pusedict.get(cp)
+ if cpdict:
+ puse_matches = ordered_by_atom_specificity(cpdict, pkg)
+ if puse_matches:
+ puse_list = []
+ for x in puse_matches:
+ puse_list.extend(x)
+ ret = " ".join(puse_list)
+ return ret
+
+ def extract_global_USE_changes(self, old=""):
+ ret = old
+ cpdict = self._pusedict.get("*/*")
+ if cpdict is not None:
+ v = cpdict.pop("*/*", None)
+ if v is not None:
+ ret = " ".join(v)
+ if old:
+ ret = old + " " + ret
+ if not cpdict:
+ #No tokens left in atom_license_map, remove it.
+ del self._pusedict["*/*"]
+ return ret
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.py
new file mode 100644
index 0000000..c4d1e36
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.py
@@ -0,0 +1,233 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'VirtualsManager',
+)
+
+from copy import deepcopy
+
+from portage import os
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.util import grabdict, stack_dictlist, writemsg
+from portage.versions import cpv_getkey
+
+class VirtualsManager(object):
+
+ def __init__(self, *args, **kwargs):
+ if kwargs.get("_copy"):
+ return
+
+ assert len(args) == 1, "VirtualsManager.__init__ takes one positional argument"
+ assert not kwargs, "unknown keyword argument(s) '%s' passed to VirtualsManager.__init__" % \
+ ", ".join(kwargs)
+
+ profiles = args[0]
+ self._virtuals = None
+ self._dirVirtuals = None
+ self._virts_p = None
+
+ # Virtuals obtained from the vartree
+ self._treeVirtuals = None
+ # Virtuals added by the depgraph via self.add_depgraph_virtuals().
+ self._depgraphVirtuals = {}
+
+ #Initialise _dirVirtuals.
+ self._read_dirVirtuals(profiles)
+
+ #We could initialise _treeVirtuals here, but some consumers want to
+ #pass their own vartree.
+
+ def _read_dirVirtuals(self, profiles):
+ """
+ Read the 'virtuals' file in all profiles.
+ """
+ virtuals_list = []
+ for x in profiles:
+ virtuals_file = os.path.join(x, "virtuals")
+ virtuals_dict = grabdict(virtuals_file)
+ atoms_dict = {}
+ for k, v in virtuals_dict.items():
+ try:
+ virt_atom = Atom(k)
+ except InvalidAtom:
+ virt_atom = None
+ else:
+ if virt_atom.blocker or \
+ str(virt_atom) != str(virt_atom.cp):
+ virt_atom = None
+ if virt_atom is None:
+ writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
+ (virtuals_file, k), noiselevel=-1)
+ continue
+ providers = []
+ for atom in v:
+ atom_orig = atom
+ if atom[:1] == '-':
+ # allow incrementals
+ atom = atom[1:]
+ try:
+ atom = Atom(atom)
+ except InvalidAtom:
+ atom = None
+ else:
+ if atom.blocker:
+ atom = None
+ if atom is None:
+ writemsg(_("--- Invalid atom in %s: %s\n") % \
+ (virtuals_file, atom_orig), noiselevel=-1)
+ else:
+ if atom_orig == str(atom):
+ # normal atom, so return as Atom instance
+ providers.append(atom)
+ else:
+ # atom has special prefix, so return as string
+ providers.append(atom_orig)
+ if providers:
+ atoms_dict[virt_atom] = providers
+ if atoms_dict:
+ virtuals_list.append(atoms_dict)
+
+ self._dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
+
+ for virt in self._dirVirtuals:
+ # Preference for virtuals decreases from left to right.
+ self._dirVirtuals[virt].reverse()
+
+ def __deepcopy__(self, memo=None):
+ if memo is None:
+ memo = {}
+ result = VirtualsManager(_copy=True)
+ memo[id(self)] = result
+
+ # immutable attributes (internal policy ensures lack of mutation)
+ # _treeVirtuals is initilised by _populate_treeVirtuals().
+ # Before that it's 'None'.
+ result._treeVirtuals = self._treeVirtuals
+ memo[id(self._treeVirtuals)] = self._treeVirtuals
+ # _dirVirtuals is initilised by __init__.
+ result._dirVirtuals = self._dirVirtuals
+ memo[id(self._dirVirtuals)] = self._dirVirtuals
+
+ # mutable attributes (change when add_depgraph_virtuals() is called)
+ result._virtuals = deepcopy(self._virtuals, memo)
+ result._depgraphVirtuals = deepcopy(self._depgraphVirtuals, memo)
+ result._virts_p = deepcopy(self._virts_p, memo)
+
+ return result
+
+ def _compile_virtuals(self):
+ """Stack installed and profile virtuals. Preference for virtuals
+ decreases from left to right.
+ Order of preference:
+ 1. installed and in profile
+ 2. installed only
+ 3. profile only
+ """
+
+ assert self._treeVirtuals is not None, "_populate_treeVirtuals() must be called before " + \
+ "any query about virtuals"
+
+ # Virtuals by profile+tree preferences.
+ ptVirtuals = {}
+
+ for virt, installed_list in self._treeVirtuals.items():
+ profile_list = self._dirVirtuals.get(virt, None)
+ if not profile_list:
+ continue
+ for cp in installed_list:
+ if cp in profile_list:
+ ptVirtuals.setdefault(virt, [])
+ ptVirtuals[virt].append(cp)
+
+ virtuals = stack_dictlist([ptVirtuals, self._treeVirtuals,
+ self._dirVirtuals, self._depgraphVirtuals])
+ self._virtuals = virtuals
+ self._virts_p = None
+
+ def getvirtuals(self):
+ """
+ Computes self._virtuals if necessary and returns it.
+ self._virtuals is only computed on the first call.
+ """
+ if self._virtuals is None:
+ self._compile_virtuals()
+
+ return self._virtuals
+
+ def _populate_treeVirtuals(self, vartree):
+ """
+ Initialize _treeVirtuals from the given vartree.
+ It must not have been initialized already, otherwise
+ our assumptions about immutability don't hold.
+ """
+ assert self._treeVirtuals is None, "treeVirtuals must not be reinitialized"
+
+ self._treeVirtuals = {}
+
+ for provide, cpv_list in vartree.get_all_provides().items():
+ try:
+ provide = Atom(provide)
+ except InvalidAtom:
+ continue
+ self._treeVirtuals[provide.cp] = \
+ [Atom(cpv_getkey(cpv)) for cpv in cpv_list]
+
+ def populate_treeVirtuals_if_needed(self, vartree):
+ """
+ Initialize _treeVirtuals if it hasn't been done already.
+ This is a hack for consumers that already have an populated vartree.
+ """
+ if self._treeVirtuals is not None:
+ return
+
+ self._populate_treeVirtuals(vartree)
+
+ def add_depgraph_virtuals(self, mycpv, virts):
+ """This updates the preferences for old-style virtuals,
+ affecting the behavior of dep_expand() and dep_check()
+ calls. It can change dbapi.match() behavior since that
+ calls dep_expand(). However, dbapi instances have
+ internal match caches that are not invalidated when
+ preferences are updated here. This can potentially
+ lead to some inconsistency (relevant to bug #1343)."""
+
+ #Ensure that self._virtuals is populated.
+ if self._virtuals is None:
+ self.getvirtuals()
+
+ modified = False
+ cp = Atom(cpv_getkey(mycpv))
+ for virt in virts:
+ try:
+ virt = Atom(virt).cp
+ except InvalidAtom:
+ continue
+ providers = self._virtuals.get(virt)
+ if providers and cp in providers:
+ continue
+ providers = self._depgraphVirtuals.get(virt)
+ if providers is None:
+ providers = []
+ self._depgraphVirtuals[virt] = providers
+ if cp not in providers:
+ providers.append(cp)
+ modified = True
+
+ if modified:
+ self._compile_virtuals()
+
+ def get_virts_p(self):
+ if self._virts_p is not None:
+ return self._virts_p
+
+ virts = self.getvirtuals()
+ virts_p = {}
+ for x in virts:
+ vkeysplit = x.split("/")
+ if vkeysplit[1] not in virts_p:
+ virts_p[vkeysplit[1]] = virts[x]
+ self._virts_p = virts_p
+ return virts_p
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.py b/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.py b/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.py
new file mode 100644
index 0000000..d3db545
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.py
@@ -0,0 +1,23 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.process import find_binary
+from portage.util import shlex_split
+
+def validate_cmd_var(v):
+ """
+ Validate an evironment variable value to see if it
+ contains an executable command as the first token.
+ returns (valid, token_list) where 'valid' is boolean and 'token_list'
+ is the (possibly empty) list of tokens split by shlex.
+ """
+ invalid = False
+ v_split = shlex_split(v)
+ if not v_split:
+ invalid = True
+ elif os.path.isabs(v_split[0]):
+ invalid = not os.access(v_split[0], os.EX_OK)
+ elif find_binary(v_split[0]) is None:
+ invalid = True
+ return (not invalid, v_split)
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.py b/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.py
new file mode 100644
index 0000000..62236fd
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.py
@@ -0,0 +1,128 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'features_set',
+)
+
+import logging
+
+from portage.const import SUPPORTED_FEATURES
+from portage.localization import _
+from portage.output import colorize
+from portage.util import writemsg_level
+
+class features_set(object):
+ """
+ Provides relevant set operations needed for access and modification of
+ config.features. The FEATURES variable is automatically synchronized
+ upon modification.
+
+ Modifications result in a permanent override that will cause the change
+ to propagate to the incremental stacking mechanism in config.regenerate().
+ This eliminates the need to call config.backup_changes() when FEATURES
+ is modified, since any overrides are guaranteed to persist despite calls
+ to config.reset().
+ """
+
+ def __init__(self, settings):
+ self._settings = settings
+ self._features = set()
+
+ def __contains__(self, k):
+ return k in self._features
+
+ def __iter__(self):
+ return iter(self._features)
+
+ def _sync_env_var(self):
+ self._settings['FEATURES'] = ' '.join(sorted(self._features))
+
+ def add(self, k):
+ self._settings.modifying()
+ self._settings._features_overrides.append(k)
+ if k not in self._features:
+ self._features.add(k)
+ self._sync_env_var()
+
+ def update(self, values):
+ self._settings.modifying()
+ values = list(values)
+ self._settings._features_overrides.extend(values)
+ need_sync = False
+ for k in values:
+ if k in self._features:
+ continue
+ self._features.add(k)
+ need_sync = True
+ if need_sync:
+ self._sync_env_var()
+
+ def difference_update(self, values):
+ self._settings.modifying()
+ values = list(values)
+ self._settings._features_overrides.extend('-' + k for k in values)
+ remove_us = self._features.intersection(values)
+ if remove_us:
+ self._features.difference_update(values)
+ self._sync_env_var()
+
+ def remove(self, k):
+ """
+ This never raises KeyError, since it records a permanent override
+ that will prevent the given flag from ever being added again by
+ incremental stacking in config.regenerate().
+ """
+ self.discard(k)
+
+ def discard(self, k):
+ self._settings.modifying()
+ self._settings._features_overrides.append('-' + k)
+ if k in self._features:
+ self._features.remove(k)
+ self._sync_env_var()
+
+ def _validate(self):
+ """
+ Implements unknown-features-warn and unknown-features-filter.
+ """
+ if 'unknown-features-warn' in self._features:
+ unknown_features = \
+ self._features.difference(SUPPORTED_FEATURES)
+ if unknown_features:
+ unknown_features = unknown_features.difference(
+ self._settings._unknown_features)
+ if unknown_features:
+ self._settings._unknown_features.update(unknown_features)
+ writemsg_level(colorize("BAD",
+ _("FEATURES variable contains unknown value(s): %s") % \
+ ", ".join(sorted(unknown_features))) \
+ + "\n", level=logging.WARNING, noiselevel=-1)
+
+ if 'unknown-features-filter' in self._features:
+ unknown_features = \
+ self._features.difference(SUPPORTED_FEATURES)
+ if unknown_features:
+ self.difference_update(unknown_features)
+ self._prune_overrides()
+
+ def _prune_overrides(self):
+ """
+ If there are lots of invalid package.env FEATURES settings
+ then unknown-features-filter can make _features_overrides
+ grow larger and larger, so prune it. This performs incremental
+ stacking with preservation of negative values since they need
+ to persist for future config.regenerate() calls.
+ """
+ overrides_set = set(self._settings._features_overrides)
+ positive = set()
+ negative = set()
+ for x in self._settings._features_overrides:
+ if x[:1] == '-':
+ positive.discard(x[1:])
+ negative.add(x[1:])
+ else:
+ positive.add(x)
+ negative.discard(x)
+ self._settings._features_overrides[:] = \
+ list(positive) + list('-' + x for x in negative)
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py
new file mode 100644
index 0000000..4f46781
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py
@@ -0,0 +1,64 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'ordered_by_atom_specificity', 'prune_incremental',
+)
+
+from _emerge.Package import Package
+from portage.dep import best_match_to_list, _repo_separator
+
+def ordered_by_atom_specificity(cpdict, pkg, repo=None):
+ """
+ Return a list of matched values from the given cpdict,
+ in ascending order by atom specificity. The rationale
+ for this order is that package.* config files are
+ typically written in ChangeLog like fashion, so it's
+ most friendly if the order that the atoms are written
+ does not matter. Therefore, settings from more specific
+ atoms override those of less specific atoms. Without
+ this behavior, settings from relatively unspecific atoms
+ would (somewhat confusingly) override the settings of
+ more specific atoms, requiring people to make adjustments
+ to the order that atoms are listed in the config file in
+ order to achieve desired results (and thus corrupting
+ the ChangeLog like ordering of the file).
+ """
+ if repo and repo != Package.UNKNOWN_REPO:
+ pkg = pkg + _repo_separator + repo
+
+ results = []
+ keys = list(cpdict)
+
+ while keys:
+ bestmatch = best_match_to_list(pkg, keys)
+ if bestmatch:
+ keys.remove(bestmatch)
+ results.append(cpdict[bestmatch])
+ else:
+ break
+
+ if results:
+ # reverse, so the most specific atoms come last
+ results.reverse()
+
+ return results
+
+def prune_incremental(split):
+ """
+ Prune off any parts of an incremental variable that are
+ made irrelevant by the latest occuring * or -*. This
+ could be more aggressive but that might be confusing
+ and the point is just to reduce noise a bit.
+ """
+ for i, x in enumerate(reversed(split)):
+ if x == '*':
+ split = split[-i-1:]
+ break
+ elif x == '-*':
+ if i == 0:
+ split = []
+ else:
+ split = split[-i:]
+ break
+ return split
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py
new file mode 100644
index 0000000..6d42809
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py
@@ -0,0 +1,185 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'case_insensitive_vars', 'default_globals', 'env_blacklist', \
+ 'environ_filter', 'environ_whitelist', 'environ_whitelist_re',
+)
+
+import re
+
+# Blacklisted variables are internal variables that are never allowed
+# to enter the config instance from the external environment or
+# configuration files.
+env_blacklist = frozenset((
+ "A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
+ "EBUILD_FORCE_TEST", "EBUILD_PHASE", "EBUILD_SKIP_MANIFEST",
+ "ED", "EMERGE_FROM", "EPREFIX", "EROOT",
+ "HOMEPAGE", "INHERITED", "IUSE",
+ "KEYWORDS", "LICENSE", "MERGE_TYPE",
+ "PDEPEND", "PF", "PKGUSE", "PORTAGE_BACKGROUND",
+ "PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDIR_LOCKED",
+ "PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
+ "PORTAGE_NONFATAL", "PORTAGE_REPO_NAME", "PORTAGE_SANDBOX_COMPAT_LEVEL",
+ "PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "RESTRICT",
+ "ROOT", "SLOT", "SRC_URI"
+))
+
+environ_whitelist = []
+
+# Whitelisted variables are always allowed to enter the ebuild
+# environment. Generally, this only includes special portage
+# variables. Ebuilds can unset variables that are not whitelisted
+# and rely on them remaining unset for future phases, without them
+# leaking back in from various locations (bug #189417). It's very
+# important to set our special BASH_ENV variable in the ebuild
+# environment in order to prevent sandbox from sourcing /etc/profile
+# in it's bashrc (causing major leakage).
+environ_whitelist += [
+ "ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "D",
+ "DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
+ "EBUILD_FORCE_TEST",
+ "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "ED",
+ "EMERGE_FROM", "EPREFIX", "EROOT",
+ "FEATURES", "FILESDIR", "HOME", "MERGE_TYPE", "NOCOLOR", "PATH",
+ "PKGDIR",
+ "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
+ "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
+ "PORTAGE_BASHRC", "PM_EBUILD_HOOK_DIR",
+ "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
+ "PORTAGE_BINPKG_TMPFILE",
+ "PORTAGE_BIN_PATH",
+ "PORTAGE_BUILDDIR", "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
+ "PORTAGE_COLORMAP",
+ "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
+ "PORTAGE_EBUILD_EXIT_FILE", "PORTAGE_FEATURES",
+ "PORTAGE_GID", "PORTAGE_GRPNAME",
+ "PORTAGE_INST_GID", "PORTAGE_INST_UID",
+ "PORTAGE_IPC_DAEMON", "PORTAGE_IUSE",
+ "PORTAGE_LOG_FILE",
+ "PORTAGE_PYM_PATH", "PORTAGE_PYTHON", "PORTAGE_QUIET",
+ "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
+ "PORTAGE_SANDBOX_COMPAT_LEVEL", "PORTAGE_SIGPIPE_STATUS",
+ "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_USERNAME",
+ "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
+ "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
+ "REPLACING_VERSIONS", "REPLACED_BY_VERSION",
+ "ROOT", "ROOTPATH", "T", "TMP", "TMPDIR",
+ "USE_EXPAND", "USE_ORDER", "WORKDIR",
+ "XARGS",
+]
+
+# user config variables
+environ_whitelist += [
+ "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
+]
+
+environ_whitelist += [
+ "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
+]
+
+# misc variables inherited from the calling environment
+environ_whitelist += [
+ "COLORTERM", "DISPLAY", "EDITOR", "LESS",
+ "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
+ "TERM", "TERMCAP", "USER",
+ 'ftp_proxy', 'http_proxy', 'no_proxy',
+]
+
+# tempdir settings
+environ_whitelist += [
+ "TMPDIR", "TEMP", "TMP",
+]
+
+# localization settings
+environ_whitelist += [
+ "LANG", "LC_COLLATE", "LC_CTYPE", "LC_MESSAGES",
+ "LC_MONETARY", "LC_NUMERIC", "LC_TIME", "LC_PAPER",
+ "LC_ALL",
+]
+
+# other variables inherited from the calling environment
+environ_whitelist += [
+ "CVS_RSH", "ECHANGELOG_USER",
+ "GPG_AGENT_INFO", "LOG_SOCKET",
+ "SSH_AGENT_PID", "SSH_AUTH_SOCK"
+ "STY", "WINDOW", "XAUTHORITY",
+]
+
+environ_whitelist = frozenset(environ_whitelist)
+
+environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
+
+# Filter selected variables in the config.environ() method so that
+# they don't needlessly propagate down into the ebuild environment.
+environ_filter = []
+
+# Exclude anything that could be extremely long here (like SRC_URI)
+# since that could cause execve() calls to fail with E2BIG errors. For
+# example, see bug #262647.
+environ_filter += [
+ 'DEPEND', 'RDEPEND', 'PDEPEND', 'SRC_URI',
+]
+
+# misc variables inherited from the calling environment
+environ_filter += [
+ "INFOPATH", "MANPATH", "USER",
+]
+
+# variables that break bash
+environ_filter += [
+ "HISTFILE", "POSIXLY_CORRECT",
+]
+
+# portage config variables and variables set directly by portage
+environ_filter += [
+ "ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN",
+ "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
+ "CONFIG_PROTECT_MASK", "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
+ "EMERGE_LOG_DIR",
+ "EMERGE_WARNING_DELAY",
+ "FETCHCOMMAND", "FETCHCOMMAND_FTP",
+ "FETCHCOMMAND_HTTP", "FETCHCOMMAND_HTTPS",
+ "FETCHCOMMAND_RSYNC", "FETCHCOMMAND_SFTP",
+ "GENTOO_MIRRORS", "NOCONFMEM", "O",
+ "PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE",
+ "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_BUILDIR_LOCKED", "PORTAGE_CALLER",
+ "PORTAGE_ELOG_CLASSES",
+ "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
+ "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
+ "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
+ "PORTAGE_GPG_DIR",
+ "PORTAGE_GPG_KEY", "PORTAGE_GPG_SIGNING_COMMAND",
+ "PORTAGE_IONICE_COMMAND",
+ "PORTAGE_PACKAGE_EMPTY_ABORT",
+ "PORTAGE_REPO_DUPLICATE_WARN",
+ "PORTAGE_RO_DISTDIRS",
+ "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
+ "PORTAGE_RSYNC_RETRIES", "PORTAGE_SYNC_STALE",
+ "PORTAGE_USE", "PORT_LOGDIR",
+ "QUICKPKG_DEFAULT_OPTS",
+ "RESUMECOMMAND", "RESUMECOMMAND_FTP",
+ "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTPS",
+ "RESUMECOMMAND_RSYNC", "RESUMECOMMAND_SFTP",
+ "SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
+]
+
+environ_filter = frozenset(environ_filter)
+
+# Variables that are not allowed to have per-repo or per-package
+# settings.
+global_only_vars = frozenset([
+ "CONFIG_PROTECT",
+])
+
+default_globals = {
+ 'ACCEPT_LICENSE': '* -@EULA',
+ 'ACCEPT_PROPERTIES': '*',
+ 'PORTAGE_BZIP2_COMMAND': 'bzip2',
+}
+
+validate_commands = ('PORTAGE_BZIP2_COMMAND', 'PORTAGE_BUNZIP2_COMMAND',)
+
+# To enhance usability, make some vars case insensitive
+# by forcing them to lower case.
+case_insensitive_vars = ('AUTOCLEAN', 'NOCOLOR',)
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.py b/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.py
new file mode 100644
index 0000000..f14050b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.py
@@ -0,0 +1,27 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.package.ebuild._ipc.IpcCommand import IpcCommand
+
+class ExitCommand(IpcCommand):
+
+ __slots__ = ('exitcode', 'reply_hook',)
+
+ def __init__(self):
+ IpcCommand.__init__(self)
+ self.reply_hook = None
+ self.exitcode = None
+
+ def __call__(self, argv):
+
+ if self.exitcode is not None:
+ # Ignore all but the first call, since if die is called
+ # then we certainly want to honor that exitcode, even
+ # the ebuild process manages to send a second exit
+ # command.
+ self.reply_hook = None
+ else:
+ self.exitcode = int(argv[1])
+
+ # (stdout, stderr, returncode)
+ return ('', '', 0)
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.py b/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.py
new file mode 100644
index 0000000..efb27f0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.py
@@ -0,0 +1,9 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class IpcCommand(object):
+
+ __slots__ = ()
+
+ def __call__(self, argv):
+ raise NotImplementedError(self)
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py
new file mode 100644
index 0000000..fb6e61e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py
@@ -0,0 +1,98 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.dep import Atom
+from portage.elog import messages as elog_messages
+from portage.exception import InvalidAtom
+from portage.package.ebuild._ipc.IpcCommand import IpcCommand
+from portage.util import normalize_path
+from portage.versions import best
+
+class QueryCommand(IpcCommand):
+
+ __slots__ = ('phase', 'settings',)
+
+ _db = None
+
+ def __init__(self, settings, phase):
+ IpcCommand.__init__(self)
+ self.settings = settings
+ self.phase = phase
+
+ def __call__(self, argv):
+ """
+ @returns: tuple of (stdout, stderr, returncode)
+ """
+
+ cmd, root, atom_str = argv
+
+ try:
+ atom = Atom(atom_str)
+ except InvalidAtom:
+ return ('', 'invalid atom: %s\n' % atom_str, 2)
+
+ warnings = []
+ try:
+ atom = Atom(atom_str, eapi=self.settings.get('EAPI'))
+ except InvalidAtom as e:
+ warnings.append(_unicode_decode("QA Notice: %s: %s") % (cmd, e))
+
+ use = self.settings.get('PORTAGE_BUILT_USE')
+ if use is None:
+ use = self.settings['PORTAGE_USE']
+
+ use = frozenset(use.split())
+ atom = atom.evaluate_conditionals(use)
+
+ db = self._db
+ if db is None:
+ db = portage.db
+
+ warnings_str = ''
+ if warnings:
+ warnings_str = self._elog('eqawarn', warnings)
+
+ root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
+ if root not in db:
+ return ('', 'invalid ROOT: %s\n' % root, 2)
+
+ vardb = db[root]["vartree"].dbapi
+
+ if cmd == 'has_version':
+ if vardb.match(atom):
+ returncode = 0
+ else:
+ returncode = 1
+ return ('', warnings_str, returncode)
+ elif cmd == 'best_version':
+ m = best(vardb.match(atom))
+ return ('%s\n' % m, warnings_str, 0)
+ else:
+ return ('', 'invalid command: %s\n' % cmd, 2)
+
+ def _elog(self, elog_funcname, lines):
+ """
+ This returns a string, to be returned via ipc and displayed at the
+ appropriate place in the build output. We wouldn't want to open the
+ log here since it is already opened by AbstractEbuildProcess and we
+ don't want to corrupt it, especially if it is being written with
+ compression.
+ """
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ return msg
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.py b/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py
new file mode 100644
index 0000000..befdc89
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py
@@ -0,0 +1,82 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import tempfile
+
+from portage import os
+from portage.const import EBUILD_PHASES
+from portage.elog import elog_process
+from portage.package.ebuild.config import config
+from portage.package.ebuild.doebuild import doebuild_environment
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.PollScheduler import PollScheduler
+
+def spawn_nofetch(portdb, ebuild_path, settings=None):
+ """
+ This spawns pkg_nofetch if appropriate. The settings parameter
+ is useful only if setcpv has already been called in order
+ to cache metadata. It will be cloned internally, in order to
+ prevent any changes from interfering with the calling code.
+ If settings is None then a suitable config instance will be
+ acquired from the given portdbapi instance.
+
+ A private PORTAGE_BUILDDIR will be created and cleaned up, in
+ order to avoid any interference with any other processes.
+ If PORTAGE_TMPDIR is writable, that will be used, otherwise
+ the default directory for the tempfile module will be used.
+
+ We only call the pkg_nofetch phase if either RESTRICT=fetch
+ is set or the package has explicitly overridden the default
+ pkg_nofetch implementation. This allows specialized messages
+ to be displayed for problematic packages even though they do
+ not set RESTRICT=fetch (bug #336499).
+
+ This function does nothing if the PORTAGE_PARALLEL_FETCHONLY
+ variable is set in the config instance.
+ """
+
+ if settings is None:
+ settings = config(clone=portdb.settings)
+ else:
+ settings = config(clone=settings)
+
+ if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
+ return
+
+ # We must create our private PORTAGE_TMPDIR before calling
+ # doebuild_environment(), since lots of variables such
+ # as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR.
+ portage_tmpdir = settings.get('PORTAGE_TMPDIR')
+ if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK):
+ portage_tmpdir = None
+ private_tmpdir = tempfile.mkdtemp(dir=portage_tmpdir)
+ settings['PORTAGE_TMPDIR'] = private_tmpdir
+ settings.backup_changes('PORTAGE_TMPDIR')
+ # private temp dir was just created, so it's not locked yet
+ settings.pop('PORTAGE_BUILDIR_LOCKED', None)
+
+ try:
+ doebuild_environment(ebuild_path, 'nofetch',
+ settings=settings, db=portdb)
+ restrict = settings['PORTAGE_RESTRICT'].split()
+ defined_phases = settings['DEFINED_PHASES'].split()
+ if not defined_phases:
+ # When DEFINED_PHASES is undefined, assume all
+ # phases are defined.
+ defined_phases = EBUILD_PHASES
+
+ if 'fetch' not in restrict and \
+ 'nofetch' not in defined_phases:
+ return
+
+ prepare_build_dirs(settings=settings)
+ ebuild_phase = EbuildPhase(background=False,
+ phase='nofetch', scheduler=PollScheduler().sched_iface,
+ settings=settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ elog_process(settings.mycpv, settings)
+ finally:
+ shutil.rmtree(private_tmpdir)
diff --git a/portage_with_autodep/pym/portage/package/ebuild/config.py b/portage_with_autodep/pym/portage/package/ebuild/config.py
new file mode 100644
index 0000000..a8c6ad6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/config.py
@@ -0,0 +1,2224 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+ 'autouse', 'best_from_dict', 'check_config_instance', 'config',
+]
+
+import copy
+from itertools import chain
+import logging
+import re
+import sys
+import warnings
+
+from _emerge.Package import Package
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.data:portage_gid',
+)
+from portage import bsd_chflags, \
+ load_mod, os, selinux, _unicode_decode
+from portage.const import CACHE_PATH, \
+ DEPCACHE_PATH, INCREMENTALS, MAKE_CONF_FILE, \
+ MODULES_FILE_PATH, PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, \
+ PRIVATE_PATH, PROFILE_PATH, USER_CONFIG_PATH, \
+ USER_VIRTUALS_FILE
+from portage.const import _SANDBOX_COMPAT_LEVEL
+from portage.dbapi import dbapi
+from portage.dbapi.porttree import portdbapi
+from portage.dbapi.vartree import vartree
+from portage.dep import Atom, isvalidatom, match_from_list, use_reduce, _repo_separator, _slot_separator
+from portage.eapi import eapi_exports_AA, eapi_exports_merge_type, \
+ eapi_supports_prefix, eapi_exports_replace_vars
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.exception import InvalidDependString, PortageException
+from portage.localization import _
+from portage.output import colorize
+from portage.process import fakeroot_capable, sandbox_capable
+from portage.repository.config import load_repository_config
+from portage.util import ensure_dirs, getconfig, grabdict, \
+ grabdict_package, grabfile, grabfile_package, LazyItemsDict, \
+ normalize_path, shlex_split, stack_dictlist, stack_dicts, stack_lists, \
+ writemsg, writemsg_level
+from portage.versions import catpkgsplit, catsplit, cpv_getkey
+
+from portage.package.ebuild._config import special_env_vars
+from portage.package.ebuild._config.env_var_validation import validate_cmd_var
+from portage.package.ebuild._config.features_set import features_set
+from portage.package.ebuild._config.KeywordsManager import KeywordsManager
+from portage.package.ebuild._config.LicenseManager import LicenseManager
+from portage.package.ebuild._config.UseManager import UseManager
+from portage.package.ebuild._config.LocationsManager import LocationsManager
+from portage.package.ebuild._config.MaskManager import MaskManager
+from portage.package.ebuild._config.VirtualsManager import VirtualsManager
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity, prune_incremental
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+def autouse(myvartree, use_cache=1, mysettings=None):
+ warnings.warn("portage.autouse() is deprecated",
+ DeprecationWarning, stacklevel=2)
+ return ""
+
+def check_config_instance(test):
+ if not isinstance(test, config):
+ raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
+
+def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
+ for x in key_order:
+ if x in top_dict and key in top_dict[x]:
+ if FullCopy:
+ return copy.deepcopy(top_dict[x][key])
+ else:
+ return top_dict[x][key]
+ if EmptyOnError:
+ return ""
+ else:
+ raise KeyError("Key not found in list; '%s'" % key)
+
+def _lazy_iuse_regex(iuse_implicit):
+ """
+ The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
+ and the value is only used when an ebuild phase needs to be executed
+ (it's used only to generate QA notices).
+ """
+ # Escape anything except ".*" which is supposed to pass through from
+ # _get_implicit_iuse().
+ regex = sorted(re.escape(x) for x in iuse_implicit)
+ regex = "^(%s)$" % "|".join(regex)
+ regex = regex.replace("\\.\\*", ".*")
+ return regex
+
+class _iuse_implicit_match_cache(object):
+
+ def __init__(self, settings):
+ self._iuse_implicit_re = re.compile("^(%s)$" % \
+ "|".join(settings._get_implicit_iuse()))
+ self._cache = {}
+
+ def __call__(self, flag):
+ """
+ Returns True if the flag is matched, False otherwise.
+ """
+ try:
+ return self._cache[flag]
+ except KeyError:
+ m = self._iuse_implicit_re.match(flag) is not None
+ self._cache[flag] = m
+ return m
+
+class config(object):
+ """
+ This class encompasses the main portage configuration. Data is pulled from
+ ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
+ parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
+ overrides.
+
+ Generally if you need data like USE flags, FEATURES, environment variables,
+ virtuals ...etc you look in here.
+ """
+
+ _setcpv_aux_keys = ('DEFINED_PHASES', 'DEPEND', 'EAPI',
+ 'INHERITED', 'IUSE', 'REQUIRED_USE', 'KEYWORDS', 'LICENSE', 'PDEPEND',
+ 'PROPERTIES', 'PROVIDE', 'RDEPEND', 'SLOT',
+ 'repository', 'RESTRICT', 'LICENSE',)
+
+ _case_insensitive_vars = special_env_vars.case_insensitive_vars
+ _default_globals = special_env_vars.default_globals
+ _env_blacklist = special_env_vars.env_blacklist
+ _environ_filter = special_env_vars.environ_filter
+ _environ_whitelist = special_env_vars.environ_whitelist
+ _environ_whitelist_re = special_env_vars.environ_whitelist_re
+ _global_only_vars = special_env_vars.global_only_vars
+
+ def __init__(self, clone=None, mycpv=None, config_profile_path=None,
+ config_incrementals=None, config_root=None, target_root=None,
+ _eprefix=None, local_config=True, env=None, _unmatched_removal=False):
+ """
+ @param clone: If provided, init will use deepcopy to copy by value the instance.
+ @type clone: Instance of config class.
+ @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
+ and then calling instance.setcpv(mycpv).
+ @type mycpv: String
+ @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
+ @type config_profile_path: String
+ @param config_incrementals: List of incremental variables
+ (defaults to portage.const.INCREMENTALS)
+ @type config_incrementals: List
+ @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
+ @type config_root: String
+ @param target_root: __init__ override of $ROOT env variable.
+ @type target_root: String
+ @param _eprefix: set the EPREFIX variable (private, used by internal tests)
+ @type _eprefix: String
+ @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
+ ignore local config (keywording and unmasking)
+ @type local_config: Boolean
+ @param env: The calling environment which is used to override settings.
+ Defaults to os.environ if unspecified.
+ @type env: dict
+ @param _unmatched_removal: Enabled by repoman when the
+ --unmatched-removal option is given.
+ @type _unmatched_removal: Boolean
+ """
+
+ # rename local _eprefix variable for convenience
+ eprefix = _eprefix
+ del _eprefix
+
+ # When initializing the global portage.settings instance, avoid
+ # raising exceptions whenever possible since exceptions thrown
+ # from 'import portage' or 'import portage.exceptions' statements
+ # can practically render the api unusable for api consumers.
+ tolerant = hasattr(portage, '_initializing_globals')
+ self._tolerant = tolerant
+
+ self.locked = 0
+ self.mycpv = None
+ self._setcpv_args_hash = None
+ self.puse = ""
+ self._penv = []
+ self.modifiedkeys = []
+ self.uvlist = []
+ self._accept_chost_re = None
+ self._accept_properties = None
+ self._features_overrides = []
+ self._make_defaults = None
+
+ # _unknown_features records unknown features that
+ # have triggered warning messages, and ensures that
+ # the same warning isn't shown twice.
+ self._unknown_features = set()
+
+ self.local_config = local_config
+
+ if clone:
+ # For immutable attributes, use shallow copy for
+ # speed and memory conservation.
+ self._tolerant = clone._tolerant
+ self.categories = clone.categories
+ self.depcachedir = clone.depcachedir
+ self.incrementals = clone.incrementals
+ self.module_priority = clone.module_priority
+ self.profile_path = clone.profile_path
+ self.profiles = clone.profiles
+ self.packages = clone.packages
+ self.repositories = clone.repositories
+ self._iuse_implicit_match = clone._iuse_implicit_match
+ self._non_user_variables = clone._non_user_variables
+ self._repo_make_defaults = clone._repo_make_defaults
+ self.usemask = clone.usemask
+ self.useforce = clone.useforce
+ self.puse = clone.puse
+ self.user_profile_dir = clone.user_profile_dir
+ self.local_config = clone.local_config
+ self.make_defaults_use = clone.make_defaults_use
+ self.mycpv = clone.mycpv
+ self._setcpv_args_hash = clone._setcpv_args_hash
+
+ # immutable attributes (internal policy ensures lack of mutation)
+ self._keywords_manager = clone._keywords_manager
+ self._use_manager = clone._use_manager
+ self._mask_manager = clone._mask_manager
+
+ # shared mutable attributes
+ self._unknown_features = clone._unknown_features
+
+ self.modules = copy.deepcopy(clone.modules)
+ self._penv = copy.deepcopy(clone._penv)
+
+ self.configdict = copy.deepcopy(clone.configdict)
+ self.configlist = [
+ self.configdict['env.d'],
+ self.configdict['repo'],
+ self.configdict['pkginternal'],
+ self.configdict['globals'],
+ self.configdict['defaults'],
+ self.configdict['conf'],
+ self.configdict['pkg'],
+ self.configdict['env'],
+ ]
+ self.lookuplist = self.configlist[:]
+ self.lookuplist.reverse()
+ self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
+ self.backupenv = self.configdict["backupenv"]
+ self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
+ self.pprovideddict = copy.deepcopy(clone.pprovideddict)
+ self.features = features_set(self)
+ self.features._features = copy.deepcopy(clone.features._features)
+ self._features_overrides = copy.deepcopy(clone._features_overrides)
+
+ #Strictly speaking _license_manager is not immutable. Users need to ensure that
+ #extract_global_changes() is called right after __init__ (if at all).
+ #It also has the mutable member _undef_lic_groups. It is used to track
+ #undefined license groups, to not display an error message for the same
+ #group again and again. Because of this, it's useful to share it between
+ #all LicenseManager instances.
+ self._license_manager = clone._license_manager
+
+ self._virtuals_manager = copy.deepcopy(clone._virtuals_manager)
+
+ self._accept_properties = copy.deepcopy(clone._accept_properties)
+ self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
+ self._penvdict = copy.deepcopy(clone._penvdict)
+ self._expand_map = copy.deepcopy(clone._expand_map)
+
+ else:
+ locations_manager = LocationsManager(config_root=config_root,
+ config_profile_path=config_profile_path, eprefix=eprefix,
+ local_config=local_config, target_root=target_root)
+
+ eprefix = locations_manager.eprefix
+ config_root = locations_manager.config_root
+ self.profiles = locations_manager.profiles
+ self.profile_path = locations_manager.profile_path
+ self.user_profile_dir = locations_manager.user_profile_dir
+ abs_user_config = locations_manager.abs_user_config
+
+ make_conf = getconfig(
+ os.path.join(config_root, MAKE_CONF_FILE),
+ tolerant=tolerant, allow_sourcing=True) or {}
+
+ make_conf.update(getconfig(
+ os.path.join(abs_user_config, 'make.conf'),
+ tolerant=tolerant, allow_sourcing=True,
+ expand=make_conf) or {})
+
+ # Allow ROOT setting to come from make.conf if it's not overridden
+ # by the constructor argument (from the calling environment).
+ locations_manager.set_root_override(make_conf.get("ROOT"))
+ target_root = locations_manager.target_root
+ eroot = locations_manager.eroot
+ self.global_config_path = locations_manager.global_config_path
+
+ if config_incrementals is None:
+ self.incrementals = INCREMENTALS
+ else:
+ self.incrementals = config_incrementals
+ if not isinstance(self.incrementals, frozenset):
+ self.incrementals = frozenset(self.incrementals)
+
+ self.module_priority = ("user", "default")
+ self.modules = {}
+ modules_loader = KeyValuePairFileLoader(
+ os.path.join(config_root, MODULES_FILE_PATH), None, None)
+ modules_dict, modules_errors = modules_loader.load()
+ self.modules["user"] = modules_dict
+ if self.modules["user"] is None:
+ self.modules["user"] = {}
+ self.modules["default"] = {
+ "portdbapi.metadbmodule": "portage.cache.metadata.database",
+ "portdbapi.auxdbmodule": "portage.cache.flat_hash.database",
+ }
+
+ self.configlist=[]
+
+ # back up our incremental variables:
+ self.configdict={}
+ self._use_expand_dict = {}
+ # configlist will contain: [ env.d, globals, defaults, conf, pkg, backupenv, env ]
+ self.configlist.append({})
+ self.configdict["env.d"] = self.configlist[-1]
+
+ self.configlist.append({})
+ self.configdict["repo"] = self.configlist[-1]
+
+ self.configlist.append({})
+ self.configdict["pkginternal"] = self.configlist[-1]
+
+ self.packages_list = [grabfile_package(os.path.join(x, "packages"), verify_eapi=True) for x in self.profiles]
+ self.packages = tuple(stack_lists(self.packages_list, incremental=1))
+ del self.packages_list
+ #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
+
+ # revmaskdict
+ self.prevmaskdict={}
+ for x in self.packages:
+ # Negative atoms are filtered by the above stack_lists() call.
+ if not isinstance(x, Atom):
+ x = Atom(x.lstrip('*'))
+ self.prevmaskdict.setdefault(x.cp, []).append(x)
+
+ # The expand_map is used for variable substitution
+ # in getconfig() calls, and the getconfig() calls
+ # update expand_map with the value of each variable
+ # assignment that occurs. Variable substitution occurs
+ # in the following order, which corresponds to the
+ # order of appearance in self.lookuplist:
+ #
+ # * env.d
+ # * make.globals
+ # * make.defaults
+ # * make.conf
+ #
+ # Notably absent is "env", since we want to avoid any
+ # interaction with the calling environment that might
+ # lead to unexpected results.
+ expand_map = {}
+ self._expand_map = expand_map
+
+ env_d = getconfig(os.path.join(eroot, "etc", "profile.env"),
+ expand=expand_map)
+ # env_d will be None if profile.env doesn't exist.
+ if env_d:
+ self.configdict["env.d"].update(env_d)
+ expand_map.update(env_d)
+
+ # backupenv is used for calculating incremental variables.
+ if env is None:
+ env = os.environ
+
+ # Avoid potential UnicodeDecodeError exceptions later.
+ env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
+ for k, v in env.items())
+
+ self.backupenv = env_unicode
+
+ if env_d:
+ # Remove duplicate values so they don't override updated
+ # profile.env values later (profile.env is reloaded in each
+ # call to self.regenerate).
+ for k, v in env_d.items():
+ try:
+ if self.backupenv[k] == v:
+ del self.backupenv[k]
+ except KeyError:
+ pass
+ del k, v
+
+ self.configdict["env"] = LazyItemsDict(self.backupenv)
+
+ for x in (self.global_config_path,):
+ self.mygcfg = getconfig(os.path.join(x, "make.globals"),
+ expand=expand_map)
+ if self.mygcfg:
+ break
+
+ if self.mygcfg is None:
+ self.mygcfg = {}
+
+ for k, v in self._default_globals.items():
+ self.mygcfg.setdefault(k, v)
+
+ self.configlist.append(self.mygcfg)
+ self.configdict["globals"]=self.configlist[-1]
+
+ self.make_defaults_use = []
+ self.mygcfg = {}
+ if self.profiles:
+ mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
+ expand=expand_map) for x in self.profiles]
+ self._make_defaults = mygcfg_dlists
+ self.mygcfg = stack_dicts(mygcfg_dlists,
+ incrementals=self.incrementals)
+ if self.mygcfg is None:
+ self.mygcfg = {}
+ self.configlist.append(self.mygcfg)
+ self.configdict["defaults"]=self.configlist[-1]
+
+ self.mygcfg = getconfig(
+ os.path.join(config_root, MAKE_CONF_FILE),
+ tolerant=tolerant, allow_sourcing=True,
+ expand=expand_map) or {}
+
+ self.mygcfg.update(getconfig(
+ os.path.join(abs_user_config, 'make.conf'),
+ tolerant=tolerant, allow_sourcing=True,
+ expand=expand_map) or {})
+
+ # Don't allow the user to override certain variables in make.conf
+ profile_only_variables = self.configdict["defaults"].get(
+ "PROFILE_ONLY_VARIABLES", "").split()
+ profile_only_variables = stack_lists([profile_only_variables])
+ non_user_variables = set()
+ non_user_variables.update(profile_only_variables)
+ non_user_variables.update(self._env_blacklist)
+ non_user_variables.update(self._global_only_vars)
+ non_user_variables = frozenset(non_user_variables)
+ self._non_user_variables = non_user_variables
+
+ for k in profile_only_variables:
+ self.mygcfg.pop(k, None)
+
+ self.configlist.append(self.mygcfg)
+ self.configdict["conf"]=self.configlist[-1]
+
+ self.configlist.append(LazyItemsDict())
+ self.configdict["pkg"]=self.configlist[-1]
+
+ self.configdict["backupenv"] = self.backupenv
+
+ # Don't allow the user to override certain variables in the env
+ for k in profile_only_variables:
+ self.backupenv.pop(k, None)
+
+ self.configlist.append(self.configdict["env"])
+
+ # make lookuplist for loading package.*
+ self.lookuplist=self.configlist[:]
+ self.lookuplist.reverse()
+
+ # Blacklist vars that could interfere with portage internals.
+ for blacklisted in self._env_blacklist:
+ for cfg in self.lookuplist:
+ cfg.pop(blacklisted, None)
+ self.backupenv.pop(blacklisted, None)
+ del blacklisted, cfg
+
+ self["PORTAGE_CONFIGROOT"] = config_root
+ self.backup_changes("PORTAGE_CONFIGROOT")
+ self["ROOT"] = target_root
+ self.backup_changes("ROOT")
+
+ self["EPREFIX"] = eprefix
+ self.backup_changes("EPREFIX")
+ self["EROOT"] = eroot
+ self.backup_changes("EROOT")
+
+ self["PORTAGE_SANDBOX_COMPAT_LEVEL"] = _SANDBOX_COMPAT_LEVEL
+ self.backup_changes("PORTAGE_SANDBOX_COMPAT_LEVEL")
+
+ self._ppropertiesdict = portage.dep.ExtendedAtomDict(dict)
+ self._penvdict = portage.dep.ExtendedAtomDict(dict)
+
+ #Loading Repositories
+ self.repositories = load_repository_config(self)
+
+ #filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
+ main_repo = self.repositories.mainRepo()
+ if main_repo is not None:
+ main_repo = main_repo.user_location
+ self["PORTDIR"] = main_repo
+ self.backup_changes("PORTDIR")
+
+ # repoman controls PORTDIR_OVERLAY via the environment, so no
+ # special cases are needed here.
+ portdir_overlay = list(self.repositories.repoUserLocationList())
+ if portdir_overlay and portdir_overlay[0] == self["PORTDIR"]:
+ portdir_overlay = portdir_overlay[1:]
+
+ new_ov = []
+ if portdir_overlay:
+ whitespace_re = re.compile(r"\s")
+ for ov in portdir_overlay:
+ ov = normalize_path(ov)
+ if os.path.isdir(ov):
+ if whitespace_re.search(ov) is not None:
+ ov = portage._shell_quote(ov)
+ new_ov.append(ov)
+ else:
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY"
+ " (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+ self["PORTDIR_OVERLAY"] = " ".join(new_ov)
+ self.backup_changes("PORTDIR_OVERLAY")
+
+ locations_manager.set_port_dirs(self["PORTDIR"], self["PORTDIR_OVERLAY"])
+
+ self._repo_make_defaults = {}
+ for repo in self.repositories.repos_with_profiles():
+ d = getconfig(os.path.join(repo.location, "profiles", "make.defaults"),
+ expand=self.configdict["globals"].copy()) or {}
+ if d:
+ for k in chain(self._env_blacklist,
+ profile_only_variables, self._global_only_vars):
+ d.pop(k, None)
+ self._repo_make_defaults[repo.name] = d
+
+ #Read package.keywords and package.accept_keywords.
+ self._keywords_manager = KeywordsManager(self.profiles, abs_user_config, \
+ local_config, global_accept_keywords=self.configdict["defaults"].get("ACCEPT_KEYWORDS", ""))
+
+ #Read all USE related files from profiles and optionally from user config.
+ self._use_manager = UseManager(self.repositories, self.profiles, abs_user_config, user_config=local_config)
+ #Initialize all USE related variables we track ourselves.
+ self.usemask = self._use_manager.getUseMask()
+ self.useforce = self._use_manager.getUseForce()
+ self.configdict["conf"]["USE"] = \
+ self._use_manager.extract_global_USE_changes( \
+ self.configdict["conf"].get("USE", ""))
+
+ #Read license_groups and optionally license_groups and package.license from user config
+ self._license_manager = LicenseManager(locations_manager.profile_locations, \
+ abs_user_config, user_config=local_config)
+ #Extract '*/*' entries from package.license
+ self.configdict["conf"]["ACCEPT_LICENSE"] = \
+ self._license_manager.extract_global_changes( \
+ self.configdict["conf"].get("ACCEPT_LICENSE", ""))
+
+ #Read package.mask and package.unmask from profiles and optionally from user config
+ self._mask_manager = MaskManager(self.repositories, self.profiles,
+ abs_user_config, user_config=local_config,
+ strict_umatched_removal=_unmatched_removal)
+
+ self._virtuals_manager = VirtualsManager(self.profiles)
+
+ if local_config:
+ #package.properties
+ propdict = grabdict_package(os.path.join(
+ abs_user_config, "package.properties"), recursive=1, allow_wildcard=True, \
+ allow_repo=True, verify_eapi=False)
+ v = propdict.pop("*/*", None)
+ if v is not None:
+ if "ACCEPT_PROPERTIES" in self.configdict["conf"]:
+ self.configdict["conf"]["ACCEPT_PROPERTIES"] += " " + " ".join(v)
+ else:
+ self.configdict["conf"]["ACCEPT_PROPERTIES"] = " ".join(v)
+ for k, v in propdict.items():
+ self._ppropertiesdict.setdefault(k.cp, {})[k] = v
+
+ #package.env
+ penvdict = grabdict_package(os.path.join(
+ abs_user_config, "package.env"), recursive=1, allow_wildcard=True, \
+ allow_repo=True, verify_eapi=False)
+ v = penvdict.pop("*/*", None)
+ if v is not None:
+ global_wildcard_conf = {}
+ self._grab_pkg_env(v, global_wildcard_conf)
+ incrementals = self.incrementals
+ conf_configdict = self.configdict["conf"]
+ for k, v in global_wildcard_conf.items():
+ if k in incrementals:
+ if k in conf_configdict:
+ conf_configdict[k] = \
+ conf_configdict[k] + " " + v
+ else:
+ conf_configdict[k] = v
+ else:
+ conf_configdict[k] = v
+ expand_map[k] = v
+
+ for k, v in penvdict.items():
+ self._penvdict.setdefault(k.cp, {})[k] = v
+
+ #getting categories from an external file now
+ self.categories = [grabfile(os.path.join(x, "categories")) \
+ for x in locations_manager.profile_and_user_locations]
+ category_re = dbapi._category_re
+ self.categories = tuple(sorted(
+ x for x in stack_lists(self.categories, incremental=1)
+ if category_re.match(x) is not None))
+
+ archlist = [grabfile(os.path.join(x, "arch.list")) \
+ for x in locations_manager.profile_and_user_locations]
+ archlist = stack_lists(archlist, incremental=1)
+ self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
+
+ pkgprovidedlines = [grabfile(os.path.join(x, "package.provided"), recursive=1) for x in self.profiles]
+ pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
+ has_invalid_data = False
+ for x in range(len(pkgprovidedlines)-1, -1, -1):
+ myline = pkgprovidedlines[x]
+ if not isvalidatom("=" + myline):
+ writemsg(_("Invalid package name in package.provided: %s\n") % \
+ myline, noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ cpvr = catpkgsplit(pkgprovidedlines[x])
+ if not cpvr or cpvr[0] == "null":
+ writemsg(_("Invalid package name in package.provided: ")+pkgprovidedlines[x]+"\n",
+ noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ if cpvr[0] == "virtual":
+ writemsg(_("Virtual package in package.provided: %s\n") % \
+ myline, noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ if has_invalid_data:
+ writemsg(_("See portage(5) for correct package.provided usage.\n"),
+ noiselevel=-1)
+ self.pprovideddict = {}
+ for x in pkgprovidedlines:
+ x_split = catpkgsplit(x)
+ if x_split is None:
+ continue
+ mycatpkg = cpv_getkey(x)
+ if mycatpkg in self.pprovideddict:
+ self.pprovideddict[mycatpkg].append(x)
+ else:
+ self.pprovideddict[mycatpkg]=[x]
+
+ # reasonable defaults; this is important as without USE_ORDER,
+ # USE will always be "" (nothing set)!
+ if "USE_ORDER" not in self:
+ self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:repo:env.d"
+
+ self["PORTAGE_GID"] = str(portage_gid)
+ self.backup_changes("PORTAGE_GID")
+
+ self.depcachedir = DEPCACHE_PATH
+ if eprefix:
+ # See comments about make.globals and EPREFIX
+ # above. DEPCACHE_PATH is similar.
+ if target_root == "/":
+ # case (1) above
+ self.depcachedir = os.path.join(eprefix,
+ DEPCACHE_PATH.lstrip(os.sep))
+ else:
+ # case (2) above
+ # For now, just assume DEPCACHE_PATH is relative
+ # to EPREFIX.
+ # TODO: Pass in more info to the constructor,
+ # so we know the host system configuration.
+ self.depcachedir = os.path.join(eprefix,
+ DEPCACHE_PATH.lstrip(os.sep))
+
+ if self.get("PORTAGE_DEPCACHEDIR", None):
+ self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
+ self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
+ self.backup_changes("PORTAGE_DEPCACHEDIR")
+
+ if "CBUILD" not in self and "CHOST" in self:
+ self["CBUILD"] = self["CHOST"]
+ self.backup_changes("CBUILD")
+
+ self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
+ self.backup_changes("PORTAGE_BIN_PATH")
+ self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
+ self.backup_changes("PORTAGE_PYM_PATH")
+
+ for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
+ try:
+ self[var] = str(int(self.get(var, "0")))
+ except ValueError:
+ writemsg(_("!!! %s='%s' is not a valid integer. "
+ "Falling back to '0'.\n") % (var, self[var]),
+ noiselevel=-1)
+ self[var] = "0"
+ self.backup_changes(var)
+
+ # initialize self.features
+ self.regenerate()
+
+ if bsd_chflags:
+ self.features.add('chflags')
+
+ if 'parse-eapi-ebuild-head' in self.features:
+ portage._validate_cache_for_unsupported_eapis = False
+
+ self._iuse_implicit_match = _iuse_implicit_match_cache(self)
+
+ self._validate_commands()
+
+ for k in self._case_insensitive_vars:
+ if k in self:
+ self[k] = self[k].lower()
+ self.backup_changes(k)
+
+ if mycpv:
+ self.setcpv(mycpv)
+
+ def _validate_commands(self):
+ for k in special_env_vars.validate_commands:
+ v = self.get(k)
+ if v is not None:
+ valid, v_split = validate_cmd_var(v)
+
+ if not valid:
+ if v_split:
+ writemsg_level(_("%s setting is invalid: '%s'\n") % \
+ (k, v), level=logging.ERROR, noiselevel=-1)
+
+ # before deleting the invalid setting, backup
+ # the default value if available
+ v = self.configdict['globals'].get(k)
+ if v is not None:
+ default_valid, v_split = validate_cmd_var(v)
+ if not default_valid:
+ if v_split:
+ writemsg_level(
+ _("%s setting from make.globals" + \
+ " is invalid: '%s'\n") % \
+ (k, v), level=logging.ERROR, noiselevel=-1)
+ # make.globals seems corrupt, so try for
+ # a hardcoded default instead
+ v = self._default_globals.get(k)
+
+ # delete all settings for this key,
+ # including the invalid one
+ del self[k]
+ self.backupenv.pop(k, None)
+ if v:
+ # restore validated default
+ self.configdict['globals'][k] = v
+
+ def _init_dirs(self):
+ """
+ Create a few directories that are critical to portage operation
+ """
+ if not os.access(self["EROOT"], os.W_OK):
+ return
+
+ # gid, mode, mask, preserve_perms
+ dir_mode_map = {
+ "tmp" : ( -1, 0o1777, 0, True),
+ "var/tmp" : ( -1, 0o1777, 0, True),
+ PRIVATE_PATH : (portage_gid, 0o2750, 0o2, False),
+ CACHE_PATH : (portage_gid, 0o755, 0o2, False)
+ }
+
+ for mypath, (gid, mode, modemask, preserve_perms) \
+ in dir_mode_map.items():
+ mydir = os.path.join(self["EROOT"], mypath)
+ if preserve_perms and os.path.isdir(mydir):
+ # Only adjust permissions on some directories if
+ # they don't exist yet. This gives freedom to the
+ # user to adjust permissions to suit their taste.
+ continue
+ try:
+ ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
+ except PortageException as e:
+ writemsg(_("!!! Directory initialization failed: '%s'\n") % mydir,
+ noiselevel=-1)
+ writemsg("!!! %s\n" % str(e),
+ noiselevel=-1)
+
+ @property
+ def pkeywordsdict(self):
+ result = self._keywords_manager.pkeywordsdict.copy()
+ for k, v in result.items():
+ result[k] = v.copy()
+ return result
+
+ @property
+ def pmaskdict(self):
+ return self._mask_manager._pmaskdict.copy()
+
+ @property
+ def punmaskdict(self):
+ return self._mask_manager._punmaskdict.copy()
+
+ def expandLicenseTokens(self, tokens):
+ """ Take a token from ACCEPT_LICENSE or package.license and expand it
+ if it's a group token (indicated by @) or just return it if it's not a
+ group. If a group is negated then negate all group elements."""
+ return self._license_manager.expandLicenseTokens(tokens)
+
+ def validate(self):
+ """Validate miscellaneous settings and display warnings if necessary.
+ (This code was previously in the global scope of portage.py)"""
+
+ groups = self["ACCEPT_KEYWORDS"].split()
+ archlist = self.archlist()
+ if not archlist:
+ writemsg(_("--- 'profiles/arch.list' is empty or "
+ "not available. Empty portage tree?\n"), noiselevel=1)
+ else:
+ for group in groups:
+ if group not in archlist and \
+ not (group.startswith("-") and group[1:] in archlist) and \
+ group not in ("*", "~*", "**"):
+ writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
+ noiselevel=-1)
+
+ abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
+ PROFILE_PATH)
+ if (not self.profile_path or \
+ not os.path.exists(os.path.join(self.profile_path, "parent"))) and \
+ os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
+ writemsg(_("\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
+ noiselevel=-1)
+ writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
+ writemsg(_("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
+
+ abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
+ USER_VIRTUALS_FILE)
+ if os.path.exists(abs_user_virtuals):
+ writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
+ writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
+ writemsg("!!! this new location.\n\n")
+
+ if not sandbox_capable and \
+ ("sandbox" in self.features or "usersandbox" in self.features):
+ if self.profile_path is not None and \
+ os.path.realpath(self.profile_path) == \
+ os.path.realpath(os.path.join(
+ self["PORTAGE_CONFIGROOT"], PROFILE_PATH)):
+ # Don't show this warning when running repoman and the
+ # sandbox feature came from a profile that doesn't belong
+ # to the user.
+ writemsg(colorize("BAD", _("!!! Problem with sandbox"
+ " binary. Disabling...\n\n")), noiselevel=-1)
+
+ if "fakeroot" in self.features and \
+ not fakeroot_capable:
+ writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
+ "fakeroot binary is not installed.\n"), noiselevel=-1)
+
+ def load_best_module(self,property_string):
+ best_mod = best_from_dict(property_string,self.modules,self.module_priority)
+ mod = None
+ try:
+ mod = load_mod(best_mod)
+ except ImportError:
+ if not best_mod.startswith("cache."):
+ raise
+ else:
+ best_mod = "portage." + best_mod
+ try:
+ mod = load_mod(best_mod)
+ except ImportError:
+ raise
+ return mod
+
+ def lock(self):
+ self.locked = 1
+
+ def unlock(self):
+ self.locked = 0
+
+ def modifying(self):
+ if self.locked:
+ raise Exception(_("Configuration is locked."))
+
+ def backup_changes(self,key=None):
+ self.modifying()
+ if key and key in self.configdict["env"]:
+ self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
+ else:
+ raise KeyError(_("No such key defined in environment: %s") % key)
+
+ def reset(self, keeping_pkg=0, use_cache=None):
+ """
+ Restore environment from self.backupenv, call self.regenerate()
+ @param keeping_pkg: Should we keep the setcpv() data or delete it.
+ @type keeping_pkg: Boolean
+ @rype: None
+ """
+
+ if use_cache is not None:
+ warnings.warn("The use_cache parameter for config.reset() is deprecated and without effect.",
+ DeprecationWarning, stacklevel=2)
+
+ self.modifying()
+ self.configdict["env"].clear()
+ self.configdict["env"].update(self.backupenv)
+
+ self.modifiedkeys = []
+ if not keeping_pkg:
+ self.mycpv = None
+ self._setcpv_args_hash = None
+ self.puse = ""
+ del self._penv[:]
+ self.configdict["pkg"].clear()
+ self.configdict["pkginternal"].clear()
+ self.configdict["repo"].clear()
+ self.configdict["defaults"]["USE"] = \
+ " ".join(self.make_defaults_use)
+ self.usemask = self._use_manager.getUseMask()
+ self.useforce = self._use_manager.getUseForce()
+ self.regenerate()
+
+ class _lazy_vars(object):
+
+ __slots__ = ('built_use', 'settings', 'values')
+
+ def __init__(self, built_use, settings):
+ self.built_use = built_use
+ self.settings = settings
+ self.values = None
+
+ def __getitem__(self, k):
+ if self.values is None:
+ self.values = self._init_values()
+ return self.values[k]
+
+ def _init_values(self):
+ values = {}
+ settings = self.settings
+ use = self.built_use
+ if use is None:
+ use = frozenset(settings['PORTAGE_USE'].split())
+
+ values['ACCEPT_LICENSE'] = settings._license_manager.get_prunned_accept_license( \
+ settings.mycpv, use, settings['LICENSE'], settings['SLOT'], settings.get('PORTAGE_REPO_NAME'))
+ values['PORTAGE_RESTRICT'] = self._restrict(use, settings)
+ return values
+
+ def _restrict(self, use, settings):
+ try:
+ restrict = set(use_reduce(settings['RESTRICT'], uselist=use, flat=True))
+ except InvalidDependString:
+ restrict = set()
+ return ' '.join(sorted(restrict))
+
+ class _lazy_use_expand(object):
+ """
+ Lazily evaluate USE_EXPAND variables since they are only needed when
+ an ebuild shell is spawned. Variables values are made consistent with
+ the previously calculated USE settings.
+ """
+
+ def __init__(self, use, usemask, iuse_implicit,
+ use_expand_split, use_expand_dict):
+ self._use = use
+ self._usemask = usemask
+ self._iuse_implicit = iuse_implicit
+ self._use_expand_split = use_expand_split
+ self._use_expand_dict = use_expand_dict
+
+ def __getitem__(self, key):
+ prefix = key.lower() + '_'
+ prefix_len = len(prefix)
+ expand_flags = set( x[prefix_len:] for x in self._use \
+ if x[:prefix_len] == prefix )
+ var_split = self._use_expand_dict.get(key, '').split()
+ # Preserve the order of var_split because it can matter for things
+ # like LINGUAS.
+ var_split = [ x for x in var_split if x in expand_flags ]
+ var_split.extend(expand_flags.difference(var_split))
+ has_wildcard = '*' in expand_flags
+ if has_wildcard:
+ var_split = [ x for x in var_split if x != "*" ]
+ has_iuse = set()
+ for x in self._iuse_implicit:
+ if x[:prefix_len] == prefix:
+ has_iuse.add(x[prefix_len:])
+ if has_wildcard:
+ # * means to enable everything in IUSE that's not masked
+ if has_iuse:
+ usemask = self._usemask
+ for suffix in has_iuse:
+ x = prefix + suffix
+ if x not in usemask:
+ if suffix not in expand_flags:
+ var_split.append(suffix)
+ else:
+ # If there is a wildcard and no matching flags in IUSE then
+ # LINGUAS should be unset so that all .mo files are
+ # installed.
+ var_split = []
+ # Make the flags unique and filter them according to IUSE.
+ # Also, continue to preserve order for things like LINGUAS
+ # and filter any duplicates that variable may contain.
+ filtered_var_split = []
+ remaining = has_iuse.intersection(var_split)
+ for x in var_split:
+ if x in remaining:
+ remaining.remove(x)
+ filtered_var_split.append(x)
+ var_split = filtered_var_split
+
+ if var_split:
+ value = ' '.join(var_split)
+ else:
+ # Don't export empty USE_EXPAND vars unless the user config
+ # exports them as empty. This is required for vars such as
+ # LINGUAS, where unset and empty have different meanings.
+ if has_wildcard:
+ # ebuild.sh will see this and unset the variable so
+ # that things like LINGUAS work properly
+ value = '*'
+ else:
+ if has_iuse:
+ value = ''
+ else:
+ # It's not in IUSE, so just allow the variable content
+ # to pass through if it is defined somewhere. This
+ # allows packages that support LINGUAS but don't
+ # declare it in IUSE to use the variable outside of the
+ # USE_EXPAND context.
+ value = None
+
+ return value
+
+ def setcpv(self, mycpv, use_cache=None, mydb=None):
+ """
+ Load a particular CPV into the config, this lets us see the
+ Default USE flags for a particular ebuild as well as the USE
+ flags from package.use.
+
+ @param mycpv: A cpv to load
+ @type mycpv: string
+ @param mydb: a dbapi instance that supports aux_get with the IUSE key.
+ @type mydb: dbapi or derivative.
+ @rtype: None
+ """
+
+ if use_cache is not None:
+ warnings.warn("The use_cache parameter for config.setcpv() is deprecated and without effect.",
+ DeprecationWarning, stacklevel=2)
+
+ self.modifying()
+
+ pkg = None
+ built_use = None
+ explicit_iuse = None
+ if not isinstance(mycpv, basestring):
+ pkg = mycpv
+ mycpv = pkg.cpv
+ mydb = pkg.metadata
+ explicit_iuse = pkg.iuse.all
+ args_hash = (mycpv, id(pkg))
+ if pkg.built:
+ built_use = pkg.use.enabled
+ else:
+ args_hash = (mycpv, id(mydb))
+
+ if args_hash == self._setcpv_args_hash:
+ return
+ self._setcpv_args_hash = args_hash
+
+ has_changed = False
+ self.mycpv = mycpv
+ cat, pf = catsplit(mycpv)
+ cp = cpv_getkey(mycpv)
+ cpv_slot = self.mycpv
+ pkginternaluse = ""
+ iuse = ""
+ pkg_configdict = self.configdict["pkg"]
+ previous_iuse = pkg_configdict.get("IUSE")
+ previous_features = pkg_configdict.get("FEATURES")
+
+ aux_keys = self._setcpv_aux_keys
+
+ # Discard any existing metadata and package.env settings from
+ # the previous package instance.
+ pkg_configdict.clear()
+
+ pkg_configdict["CATEGORY"] = cat
+ pkg_configdict["PF"] = pf
+ repository = None
+ if mydb:
+ if not hasattr(mydb, "aux_get"):
+ for k in aux_keys:
+ if k in mydb:
+ # Make these lazy, since __getitem__ triggers
+ # evaluation of USE conditionals which can't
+ # occur until PORTAGE_USE is calculated below.
+ pkg_configdict.addLazySingleton(k,
+ mydb.__getitem__, k)
+ else:
+ # When calling dbapi.aux_get(), grab USE for built/installed
+ # packages since we want to save it PORTAGE_BUILT_USE for
+ # evaluating conditional USE deps in atoms passed via IPC to
+ # helpers like has_version and best_version.
+ aux_keys = list(aux_keys)
+ aux_keys.append('USE')
+ for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
+ pkg_configdict[k] = v
+ built_use = frozenset(pkg_configdict.pop('USE').split())
+ if not built_use:
+ # Empty USE means this dbapi instance does not contain
+ # built packages.
+ built_use = None
+
+ repository = pkg_configdict.pop("repository", None)
+ if repository is not None:
+ pkg_configdict["PORTAGE_REPO_NAME"] = repository
+ slot = pkg_configdict["SLOT"]
+ iuse = pkg_configdict["IUSE"]
+ if pkg is None:
+ cpv_slot = "%s:%s" % (self.mycpv, slot)
+ else:
+ cpv_slot = pkg
+ pkginternaluse = []
+ for x in iuse.split():
+ if x.startswith("+"):
+ pkginternaluse.append(x[1:])
+ elif x.startswith("-"):
+ pkginternaluse.append(x)
+ pkginternaluse = " ".join(pkginternaluse)
+ if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
+ self.configdict["pkginternal"]["USE"] = pkginternaluse
+ has_changed = True
+
+ repo_env = []
+ if repository and repository != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[repository].masters)
+ except KeyError:
+ pass
+ repos.append(repository)
+ for repo in repos:
+ d = self._repo_make_defaults.get(repo)
+ if d is None:
+ d = {}
+ else:
+ # make a copy, since we might modify it with
+ # package.use settings
+ d = d.copy()
+ cpdict = self._use_manager._repo_puse_dict.get(repo, {}).get(cp)
+ if cpdict:
+ repo_puse = ordered_by_atom_specificity(cpdict, cpv_slot)
+ if repo_puse:
+ for x in repo_puse:
+ d["USE"] = d.get("USE", "") + " " + " ".join(x)
+ if d:
+ repo_env.append(d)
+
+ if repo_env or self.configdict["repo"]:
+ self.configdict["repo"].clear()
+ self.configdict["repo"].update(stack_dicts(repo_env,
+ incrementals=self.incrementals))
+ has_changed = True
+
+ defaults = []
+ for i, pkgprofileuse_dict in enumerate(self._use_manager._pkgprofileuse):
+ if self.make_defaults_use[i]:
+ defaults.append(self.make_defaults_use[i])
+ cpdict = pkgprofileuse_dict.get(cp)
+ if cpdict:
+ pkg_defaults = ordered_by_atom_specificity(cpdict, cpv_slot)
+ if pkg_defaults:
+ defaults.extend(pkg_defaults)
+ defaults = " ".join(defaults)
+ if defaults != self.configdict["defaults"].get("USE",""):
+ self.configdict["defaults"]["USE"] = defaults
+ has_changed = True
+
+ useforce = self._use_manager.getUseForce(cpv_slot)
+ if useforce != self.useforce:
+ self.useforce = useforce
+ has_changed = True
+
+ usemask = self._use_manager.getUseMask(cpv_slot)
+ if usemask != self.usemask:
+ self.usemask = usemask
+ has_changed = True
+
+ oldpuse = self.puse
+ self.puse = self._use_manager.getPUSE(cpv_slot)
+ if oldpuse != self.puse:
+ has_changed = True
+ self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
+ self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
+
+ if previous_features:
+ # The package from the previous setcpv call had package.env
+ # settings which modified FEATURES. Therefore, trigger a
+ # regenerate() call in order to ensure that self.features
+ # is accurate.
+ has_changed = True
+
+ self._penv = []
+ cpdict = self._penvdict.get(cp)
+ if cpdict:
+ penv_matches = ordered_by_atom_specificity(cpdict, cpv_slot)
+ if penv_matches:
+ for x in penv_matches:
+ self._penv.extend(x)
+
+ protected_pkg_keys = set(pkg_configdict)
+ protected_pkg_keys.discard('USE')
+
+ # If there are _any_ package.env settings for this package
+ # then it automatically triggers config.reset(), in order
+ # to account for possible incremental interaction between
+ # package.use, package.env, and overrides from the calling
+ # environment (configdict['env']).
+ if self._penv:
+ has_changed = True
+ # USE is special because package.use settings override
+ # it. Discard any package.use settings here and they'll
+ # be added back later.
+ pkg_configdict.pop('USE', None)
+ self._grab_pkg_env(self._penv, pkg_configdict,
+ protected_keys=protected_pkg_keys)
+
+ # Now add package.use settings, which override USE from
+ # package.env
+ if self.puse:
+ if 'USE' in pkg_configdict:
+ pkg_configdict['USE'] = \
+ pkg_configdict['USE'] + " " + self.puse
+ else:
+ pkg_configdict['USE'] = self.puse
+
+ if has_changed:
+ self.reset(keeping_pkg=1)
+
+ env_configdict = self.configdict['env']
+
+ # Ensure that "pkg" values are always preferred over "env" values.
+ # This must occur _after_ the above reset() call, since reset()
+ # copies values from self.backupenv.
+ for k in protected_pkg_keys:
+ env_configdict.pop(k, None)
+
+ lazy_vars = self._lazy_vars(built_use, self)
+ env_configdict.addLazySingleton('ACCEPT_LICENSE',
+ lazy_vars.__getitem__, 'ACCEPT_LICENSE')
+ env_configdict.addLazySingleton('PORTAGE_RESTRICT',
+ lazy_vars.__getitem__, 'PORTAGE_RESTRICT')
+
+ if built_use is not None:
+ pkg_configdict['PORTAGE_BUILT_USE'] = ' '.join(built_use)
+
+ # If reset() has not been called, it's safe to return
+ # early if IUSE has not changed.
+ if not has_changed and previous_iuse == iuse:
+ return
+
+ # Filter out USE flags that aren't part of IUSE. This has to
+ # be done for every setcpv() call since practically every
+ # package has different IUSE.
+ use = set(self["USE"].split())
+ if explicit_iuse is None:
+ explicit_iuse = frozenset(x.lstrip("+-") for x in iuse.split())
+ iuse_implicit_match = self._iuse_implicit_match
+ portage_iuse = self._get_implicit_iuse()
+ portage_iuse.update(explicit_iuse)
+
+ # PORTAGE_IUSE is not always needed so it's lazily evaluated.
+ self.configdict["env"].addLazySingleton(
+ "PORTAGE_IUSE", _lazy_iuse_regex, portage_iuse)
+
+ ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
+ if ebuild_force_test and \
+ not hasattr(self, "_ebuild_force_test_msg_shown"):
+ self._ebuild_force_test_msg_shown = True
+ writemsg(_("Forcing test.\n"), noiselevel=-1)
+ if "test" in self.features:
+ if "test" in self.usemask and not ebuild_force_test:
+ # "test" is in IUSE and USE=test is masked, so execution
+ # of src_test() probably is not reliable. Therefore,
+ # temporarily disable FEATURES=test just for this package.
+ self["FEATURES"] = " ".join(x for x in self.features \
+ if x != "test")
+ use.discard("test")
+ else:
+ use.add("test")
+ if ebuild_force_test and "test" in self.usemask:
+ self.usemask = \
+ frozenset(x for x in self.usemask if x != "test")
+
+ # Allow _* flags from USE_EXPAND wildcards to pass through here.
+ use.difference_update([x for x in use \
+ if (x not in explicit_iuse and \
+ not iuse_implicit_match(x)) and x[-2:] != '_*'])
+
+ # Use the calculated USE flags to regenerate the USE_EXPAND flags so
+ # that they are consistent. For optimal performance, use slice
+ # comparison instead of startswith().
+ use_expand_split = set(x.lower() for \
+ x in self.get('USE_EXPAND', '').split())
+ lazy_use_expand = self._lazy_use_expand(use, self.usemask,
+ portage_iuse, use_expand_split, self._use_expand_dict)
+
+ use_expand_iuses = {}
+ for x in portage_iuse:
+ x_split = x.split('_')
+ if len(x_split) == 1:
+ continue
+ for i in range(len(x_split) - 1):
+ k = '_'.join(x_split[:i+1])
+ if k in use_expand_split:
+ v = use_expand_iuses.get(k)
+ if v is None:
+ v = set()
+ use_expand_iuses[k] = v
+ v.add(x)
+ break
+
+ # If it's not in IUSE, variable content is allowed
+ # to pass through if it is defined somewhere. This
+ # allows packages that support LINGUAS but don't
+ # declare it in IUSE to use the variable outside of the
+ # USE_EXPAND context.
+ for k, use_expand_iuse in use_expand_iuses.items():
+ if k + '_*' in use:
+ use.update( x for x in use_expand_iuse if x not in usemask )
+ k = k.upper()
+ self.configdict['env'].addLazySingleton(k,
+ lazy_use_expand.__getitem__, k)
+
+ # Filtered for the ebuild environment. Store this in a separate
+ # attribute since we still want to be able to see global USE
+ # settings for things like emerge --info.
+
+ self.configdict["env"]["PORTAGE_USE"] = \
+ " ".join(sorted(x for x in use if x[-2:] != '_*'))
+
+ def _grab_pkg_env(self, penv, container, protected_keys=None):
+ if protected_keys is None:
+ protected_keys = ()
+ abs_user_config = os.path.join(
+ self['PORTAGE_CONFIGROOT'], USER_CONFIG_PATH)
+ non_user_variables = self._non_user_variables
+ # Make a copy since we don't want per-package settings
+ # to pollute the global expand_map.
+ expand_map = self._expand_map.copy()
+ incrementals = self.incrementals
+ for envname in penv:
+ penvfile = os.path.join(abs_user_config, "env", envname)
+ penvconfig = getconfig(penvfile, tolerant=self._tolerant,
+ allow_sourcing=True, expand=expand_map)
+ if penvconfig is None:
+ writemsg("!!! %s references non-existent file: %s\n" % \
+ (os.path.join(abs_user_config, 'package.env'), penvfile),
+ noiselevel=-1)
+ else:
+ for k, v in penvconfig.items():
+ if k in protected_keys or \
+ k in non_user_variables:
+ writemsg("!!! Illegal variable " + \
+ "'%s' assigned in '%s'\n" % \
+ (k, penvfile), noiselevel=-1)
+ elif k in incrementals:
+ if k in container:
+ container[k] = container[k] + " " + v
+ else:
+ container[k] = v
+ else:
+ container[k] = v
+
+ def _get_implicit_iuse(self):
+ """
+ Some flags are considered to
+ be implicit members of IUSE:
+ * Flags derived from ARCH
+ * Flags derived from USE_EXPAND_HIDDEN variables
+ * Masked flags, such as those from {,package}use.mask
+ * Forced flags, such as those from {,package}use.force
+ * build and bootstrap flags used by bootstrap.sh
+ """
+ iuse_implicit = set()
+ # Flags derived from ARCH.
+ arch = self.configdict["defaults"].get("ARCH")
+ if arch:
+ iuse_implicit.add(arch)
+ iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
+
+ # Flags derived from USE_EXPAND_HIDDEN variables
+ # such as ELIBC, KERNEL, and USERLAND.
+ use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
+ for x in use_expand_hidden:
+ iuse_implicit.add(x.lower() + "_.*")
+
+ # Flags that have been masked or forced.
+ iuse_implicit.update(self.usemask)
+ iuse_implicit.update(self.useforce)
+
+ # build and bootstrap flags used by bootstrap.sh
+ iuse_implicit.add("build")
+ iuse_implicit.add("bootstrap")
+
+ # Controlled by FEATURES=test. Make this implicit, so handling
+ # of FEATURES=test is consistent regardless of explicit IUSE.
+ # Users may use use.mask/package.use.mask to control
+ # FEATURES=test for all ebuilds, regardless of explicit IUSE.
+ iuse_implicit.add("test")
+
+ return iuse_implicit
+
+ def _getUseMask(self, pkg):
+ return self._use_manager.getUseMask(pkg)
+
+ def _getUseForce(self, pkg):
+ return self._use_manager.getUseForce(pkg)
+
+ def _getMaskAtom(self, cpv, metadata):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask. PROVIDE
+ is not checked, so atoms will not be found for old-style virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+ return self._mask_manager.getMaskAtom(cpv, metadata["SLOT"], metadata.get('repository'))
+
+ def _getRawMaskAtom(self, cpv, metadata):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask. PROVIDE
+ is not checked, so atoms will not be found for old-style virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+ return self._mask_manager.getRawMaskAtom(cpv, metadata["SLOT"], metadata.get('repository'))
+
+
+ def _getProfileMaskAtom(self, cpv, metadata):
+ """
+ Take a package and return a matching profile atom, or None if no
+ such atom exists. Note that a profile atom may or may not have a "*"
+ prefix. PROVIDE is not checked, so atoms will not be found for
+ old-style virtuals.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: String
+ @return: A matching profile atom string or None if one is not found.
+ """
+
+ cp = cpv_getkey(cpv)
+ profile_atoms = self.prevmaskdict.get(cp)
+ if profile_atoms:
+ pkg = "".join((cpv, _slot_separator, metadata["SLOT"]))
+ repo = metadata.get("repository")
+ if repo and repo != Package.UNKNOWN_REPO:
+ pkg = "".join((pkg, _repo_separator, repo))
+ pkg_list = [pkg]
+ for x in profile_atoms:
+ if match_from_list(x, pkg_list):
+ continue
+ return x
+ return None
+
+ def _getKeywords(self, cpv, metadata):
+ return self._keywords_manager.getKeywords(cpv, metadata["SLOT"], \
+ metadata.get("KEYWORDS", ""), metadata.get("repository"))
+
+ def _getMissingKeywords(self, cpv, metadata):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty
+ and the the ** keyword has not been accepted, the returned list will
+ contain ** alone (in order to distinguish from the case of "none
+ missing").
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of KEYWORDS that have not been accepted.
+ """
+
+ # Hack: Need to check the env directly here as otherwise stacking
+ # doesn't work properly as negative values are lost in the config
+ # object (bug #139600)
+ backuped_accept_keywords = self.configdict["backupenv"].get("ACCEPT_KEYWORDS", "")
+ global_accept_keywords = self["ACCEPT_KEYWORDS"]
+
+ return self._keywords_manager.getMissingKeywords(cpv, metadata["SLOT"], \
+ metadata.get("KEYWORDS", ""), metadata.get('repository'), \
+ global_accept_keywords, backuped_accept_keywords)
+
+ def _getRawMissingKeywords(self, cpv, metadata):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty,
+ the returned list will contain ** alone (in order to distinguish
+ from the case of "none missing"). This DOES NOT apply any user config
+ package.accept_keywords acceptance.
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: lists of KEYWORDS that have not been accepted
+ and the keywords it looked for.
+ """
+ return self._keywords_manager.getRawMissingKeywords(cpv, metadata["SLOT"], \
+ metadata.get("KEYWORDS", ""), metadata.get('repository'), \
+ self.get("ACCEPT_KEYWORDS", ""))
+
+ def _getPKeywords(self, cpv, metadata):
+ global_accept_keywords = self.get("ACCEPT_KEYWORDS", "")
+
+ return self._keywords_manager.getPKeywords(cpv, metadata["SLOT"], \
+ metadata.get('repository'), global_accept_keywords)
+
+ def _getMissingLicenses(self, cpv, metadata):
+ """
+ Take a LICENSE string and return a list of any licenses that the user
+ may need to accept for the given package. The returned list will not
+ contain any licenses that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.license support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of licenses that have not been accepted.
+ """
+ return self._license_manager.getMissingLicenses( \
+ cpv, metadata["USE"], metadata["LICENSE"], metadata["SLOT"], metadata.get('repository'))
+
+ def _getMissingProperties(self, cpv, metadata):
+ """
+ Take a PROPERTIES string and return a list of any properties the user
+ may need to accept for the given package. The returned list will not
+ contain any properties that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.properties support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of properties that have not been accepted.
+ """
+ accept_properties = self._accept_properties
+ cp = cpv_getkey(cpv)
+ cpdict = self._ppropertiesdict.get(cp)
+ if cpdict:
+ cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
+ pproperties_list = ordered_by_atom_specificity(cpdict, cpv_slot, repo=metadata.get('repository'))
+ if pproperties_list:
+ accept_properties = list(self._accept_properties)
+ for x in pproperties_list:
+ accept_properties.extend(x)
+
+ properties_str = metadata.get("PROPERTIES", "")
+ properties = set(use_reduce(properties_str, matchall=1, flat=True))
+ properties.discard('||')
+
+ acceptable_properties = set()
+ for x in accept_properties:
+ if x == '*':
+ acceptable_properties.update(properties)
+ elif x == '-*':
+ acceptable_properties.clear()
+ elif x[:1] == '-':
+ acceptable_properties.discard(x[1:])
+ else:
+ acceptable_properties.add(x)
+
+ if "?" in properties_str:
+ use = metadata["USE"].split()
+ else:
+ use = []
+
+ properties_struct = use_reduce(properties_str, uselist=use, opconvert=True)
+ return self._getMaskedProperties(properties_struct, acceptable_properties)
+
+ def _getMaskedProperties(self, properties_struct, acceptable_properties):
+ if not properties_struct:
+ return []
+ if properties_struct[0] == "||":
+ ret = []
+ for element in properties_struct[1:]:
+ if isinstance(element, list):
+ if element:
+ tmp = self._getMaskedProperties(
+ element, acceptable_properties)
+ if not tmp:
+ return []
+ ret.extend(tmp)
+ else:
+ if element in acceptable_properties:
+ return[]
+ ret.append(element)
+ # Return all masked properties, since we don't know which combination
+ # (if any) the user will decide to unmask
+ return ret
+
+ ret = []
+ for element in properties_struct:
+ if isinstance(element, list):
+ if element:
+ ret.extend(self._getMaskedProperties(element,
+ acceptable_properties))
+ else:
+ if element not in acceptable_properties:
+ ret.append(element)
+ return ret
+
+ def _accept_chost(self, cpv, metadata):
+ """
+ @return True if pkg CHOST is accepted, False otherwise.
+ """
+ if self._accept_chost_re is None:
+ accept_chost = self.get("ACCEPT_CHOSTS", "").split()
+ if not accept_chost:
+ chost = self.get("CHOST")
+ if chost:
+ accept_chost.append(chost)
+ if not accept_chost:
+ self._accept_chost_re = re.compile(".*")
+ elif len(accept_chost) == 1:
+ try:
+ self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
+ except re.error as e:
+ writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
+ (accept_chost[0], e), noiselevel=-1)
+ self._accept_chost_re = re.compile("^$")
+ else:
+ try:
+ self._accept_chost_re = re.compile(
+ r'^(%s)$' % "|".join(accept_chost))
+ except re.error as e:
+ writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
+ (" ".join(accept_chost), e), noiselevel=-1)
+ self._accept_chost_re = re.compile("^$")
+
+ pkg_chost = metadata.get('CHOST', '')
+ return not pkg_chost or \
+ self._accept_chost_re.match(pkg_chost) is not None
+
+ def setinst(self, mycpv, mydbapi):
+ """This updates the preferences for old-style virtuals,
+ affecting the behavior of dep_expand() and dep_check()
+ calls. It can change dbapi.match() behavior since that
+ calls dep_expand(). However, dbapi instances have
+ internal match caches that are not invalidated when
+ preferences are updated here. This can potentially
+ lead to some inconsistency (relevant to bug #1343)."""
+ self.modifying()
+
+ # Grab the virtuals this package provides and add them into the tree virtuals.
+ if not hasattr(mydbapi, "aux_get"):
+ provides = mydbapi["PROVIDE"]
+ else:
+ provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
+ if not provides:
+ return
+ if isinstance(mydbapi, portdbapi):
+ self.setcpv(mycpv, mydb=mydbapi)
+ myuse = self["PORTAGE_USE"]
+ elif not hasattr(mydbapi, "aux_get"):
+ myuse = mydbapi["USE"]
+ else:
+ myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
+ virts = use_reduce(provides, uselist=myuse.split(), flat=True)
+
+ # Ensure that we don't trigger the _treeVirtuals
+ # assertion in VirtualsManager._compile_virtuals().
+ self.getvirtuals()
+ self._virtuals_manager.add_depgraph_virtuals(mycpv, virts)
+
+ def reload(self):
+ """Reload things like /etc/profile.env that can change during runtime."""
+ env_d_filename = os.path.join(self["EROOT"], "etc", "profile.env")
+ self.configdict["env.d"].clear()
+ env_d = getconfig(env_d_filename, expand=False)
+ if env_d:
+ # env_d will be None if profile.env doesn't exist.
+ self.configdict["env.d"].update(env_d)
+
+ def regenerate(self, useonly=0, use_cache=None):
+ """
+ Regenerate settings
+ This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
+ re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
+ variables. This also updates the env.d configdict; useful in case an ebuild
+ changes the environment.
+
+ If FEATURES has already stacked, it is not stacked twice.
+
+ @param useonly: Only regenerate USE flags (not any other incrementals)
+ @type useonly: Boolean
+ @rtype: None
+ """
+
+ if use_cache is not None:
+ warnings.warn("The use_cache parameter for config.regenerate() is deprecated and without effect.",
+ DeprecationWarning, stacklevel=2)
+
+ self.modifying()
+
+ if useonly:
+ myincrementals=["USE"]
+ else:
+ myincrementals = self.incrementals
+ myincrementals = set(myincrementals)
+
+ # Process USE last because it depends on USE_EXPAND which is also
+ # an incremental!
+ myincrementals.discard("USE")
+
+ mydbs = self.configlist[:-1]
+ mydbs.append(self.backupenv)
+
+ # ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
+ # used to match all licenses without every having to explicitly expand
+ # it to all licenses.
+ if self.local_config:
+ mysplit = []
+ for curdb in mydbs:
+ mysplit.extend(curdb.get('ACCEPT_LICENSE', '').split())
+ mysplit = prune_incremental(mysplit)
+ accept_license_str = ' '.join(mysplit)
+ self.configlist[-1]['ACCEPT_LICENSE'] = accept_license_str
+ self._license_manager.set_accept_license_str(accept_license_str)
+ else:
+ # repoman will accept any license
+ self._license_manager.set_accept_license_str("*")
+
+ # ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
+ if self.local_config:
+ mysplit = []
+ for curdb in mydbs:
+ mysplit.extend(curdb.get('ACCEPT_PROPERTIES', '').split())
+ mysplit = prune_incremental(mysplit)
+ self.configlist[-1]['ACCEPT_PROPERTIES'] = ' '.join(mysplit)
+ if tuple(mysplit) != self._accept_properties:
+ self._accept_properties = tuple(mysplit)
+ else:
+ # repoman will accept any property
+ self._accept_properties = ('*',)
+
+ increment_lists = {}
+ for k in myincrementals:
+ incremental_list = []
+ increment_lists[k] = incremental_list
+ for curdb in mydbs:
+ v = curdb.get(k)
+ if v is not None:
+ incremental_list.append(v.split())
+
+ if 'FEATURES' in increment_lists:
+ increment_lists['FEATURES'].append(self._features_overrides)
+
+ myflags = set()
+ for mykey, incremental_list in increment_lists.items():
+
+ myflags.clear()
+ for mysplit in incremental_list:
+
+ for x in mysplit:
+ if x=="-*":
+ # "-*" is a special "minus" var that means "unset all settings".
+ # so USE="-* gnome" will have *just* gnome enabled.
+ myflags.clear()
+ continue
+
+ if x[0]=="+":
+ # Not legal. People assume too much. Complain.
+ writemsg(colorize("BAD",
+ _("%s values should not start with a '+': %s") % (mykey,x)) \
+ + "\n", noiselevel=-1)
+ x=x[1:]
+ if not x:
+ continue
+
+ if (x[0]=="-"):
+ myflags.discard(x[1:])
+ continue
+
+ # We got here, so add it now.
+ myflags.add(x)
+
+ #store setting in last element of configlist, the original environment:
+ if myflags or mykey in self:
+ self.configlist[-1][mykey] = " ".join(sorted(myflags))
+
+ # Do the USE calculation last because it depends on USE_EXPAND.
+ use_expand = self.get("USE_EXPAND", "").split()
+ use_expand_dict = self._use_expand_dict
+ use_expand_dict.clear()
+ for k in use_expand:
+ v = self.get(k)
+ if v is not None:
+ use_expand_dict[k] = v
+
+ # In order to best accomodate the long-standing practice of
+ # setting default USE_EXPAND variables in the profile's
+ # make.defaults, we translate these variables into their
+ # equivalent USE flags so that useful incremental behavior
+ # is enabled (for sub-profiles).
+ configdict_defaults = self.configdict['defaults']
+ if self._make_defaults is not None:
+ for i, cfg in enumerate(self._make_defaults):
+ if not cfg:
+ self.make_defaults_use.append("")
+ continue
+ use = cfg.get("USE", "")
+ expand_use = []
+ for k in use_expand_dict:
+ v = cfg.get(k)
+ if v is None:
+ continue
+ prefix = k.lower() + '_'
+ if k in myincrementals:
+ for x in v.split():
+ if x[:1] == '-':
+ expand_use.append('-' + prefix + x[1:])
+ else:
+ expand_use.append(prefix + x)
+ else:
+ for x in v.split():
+ expand_use.append(prefix + x)
+ if expand_use:
+ expand_use.append(use)
+ use = ' '.join(expand_use)
+ self.make_defaults_use.append(use)
+ self.make_defaults_use = tuple(self.make_defaults_use)
+ configdict_defaults['USE'] = ' '.join(
+ stack_lists([x.split() for x in self.make_defaults_use]))
+ # Set to None so this code only runs once.
+ self._make_defaults = None
+
+ if not self.uvlist:
+ for x in self["USE_ORDER"].split(":"):
+ if x in self.configdict:
+ self.uvlist.append(self.configdict[x])
+ self.uvlist.reverse()
+
+ # For optimal performance, use slice
+ # comparison instead of startswith().
+ iuse = self.configdict["pkg"].get("IUSE")
+ if iuse is not None:
+ iuse = [x.lstrip("+-") for x in iuse.split()]
+ myflags = set()
+ for curdb in self.uvlist:
+ cur_use_expand = [x for x in use_expand if x in curdb]
+ mysplit = curdb.get("USE", "").split()
+ if not mysplit and not cur_use_expand:
+ continue
+ for x in mysplit:
+ if x == "-*":
+ myflags.clear()
+ continue
+
+ if x[0] == "+":
+ writemsg(colorize("BAD", _("USE flags should not start "
+ "with a '+': %s\n") % x), noiselevel=-1)
+ x = x[1:]
+ if not x:
+ continue
+
+ if x[0] == "-":
+ if x[-2:] == '_*':
+ prefix = x[1:-1]
+ prefix_len = len(prefix)
+ myflags.difference_update(
+ [y for y in myflags if \
+ y[:prefix_len] == prefix])
+ myflags.discard(x[1:])
+ continue
+
+ if iuse is not None and x[-2:] == '_*':
+ # Expand wildcards here, so that cases like
+ # USE="linguas_* -linguas_en_US" work correctly.
+ prefix = x[:-1]
+ prefix_len = len(prefix)
+ has_iuse = False
+ for y in iuse:
+ if y[:prefix_len] == prefix:
+ has_iuse = True
+ myflags.add(y)
+ if not has_iuse:
+ # There are no matching IUSE, so allow the
+ # wildcard to pass through. This allows
+ # linguas_* to trigger unset LINGUAS in
+ # cases when no linguas_ flags are in IUSE.
+ myflags.add(x)
+ else:
+ myflags.add(x)
+
+ if curdb is configdict_defaults:
+ # USE_EXPAND flags from make.defaults are handled
+ # earlier, in order to provide useful incremental
+ # behavior (for sub-profiles).
+ continue
+
+ for var in cur_use_expand:
+ var_lower = var.lower()
+ is_not_incremental = var not in myincrementals
+ if is_not_incremental:
+ prefix = var_lower + "_"
+ prefix_len = len(prefix)
+ for x in list(myflags):
+ if x[:prefix_len] == prefix:
+ myflags.remove(x)
+ for x in curdb[var].split():
+ if x[0] == "+":
+ if is_not_incremental:
+ writemsg(colorize("BAD", _("Invalid '+' "
+ "operator in non-incremental variable "
+ "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+ continue
+ else:
+ writemsg(colorize("BAD", _("Invalid '+' "
+ "operator in incremental variable "
+ "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+ x = x[1:]
+ if x[0] == "-":
+ if is_not_incremental:
+ writemsg(colorize("BAD", _("Invalid '-' "
+ "operator in non-incremental variable "
+ "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+ continue
+ myflags.discard(var_lower + "_" + x[1:])
+ continue
+ myflags.add(var_lower + "_" + x)
+
+ if hasattr(self, "features"):
+ self.features._features.clear()
+ else:
+ self.features = features_set(self)
+ self.features._features.update(self.get('FEATURES', '').split())
+ self.features._sync_env_var()
+ self.features._validate()
+
+ myflags.update(self.useforce)
+ arch = self.configdict["defaults"].get("ARCH")
+ if arch:
+ myflags.add(arch)
+
+ myflags.difference_update(self.usemask)
+ self.configlist[-1]["USE"]= " ".join(sorted(myflags))
+
+ if self.mycpv is None:
+ # Generate global USE_EXPAND variables settings that are
+ # consistent with USE, for display by emerge --info. For
+ # package instances, these are instead generated via
+ # setcpv().
+ for k in use_expand:
+ prefix = k.lower() + '_'
+ prefix_len = len(prefix)
+ expand_flags = set( x[prefix_len:] for x in myflags \
+ if x[:prefix_len] == prefix )
+ var_split = use_expand_dict.get(k, '').split()
+ var_split = [ x for x in var_split if x in expand_flags ]
+ var_split.extend(sorted(expand_flags.difference(var_split)))
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
+ @property
+ def virts_p(self):
+ warnings.warn("portage config.virts_p attribute " + \
+ "is deprecated, use config.get_virts_p()",
+ DeprecationWarning, stacklevel=2)
+ return self.get_virts_p()
+
+ @property
+ def virtuals(self):
+ warnings.warn("portage config.virtuals attribute " + \
+ "is deprecated, use config.getvirtuals()",
+ DeprecationWarning, stacklevel=2)
+ return self.getvirtuals()
+
+ def get_virts_p(self):
+ # Ensure that we don't trigger the _treeVirtuals
+ # assertion in VirtualsManager._compile_virtuals().
+ self.getvirtuals()
+ return self._virtuals_manager.get_virts_p()
+
+ def getvirtuals(self):
+ if self._virtuals_manager._treeVirtuals is None:
+ #Hack around the fact that VirtualsManager needs a vartree
+ #and vartree needs a config instance.
+ #This code should be part of VirtualsManager.getvirtuals().
+ if self.local_config:
+ temp_vartree = vartree(settings=self)
+ self._virtuals_manager._populate_treeVirtuals(temp_vartree)
+ else:
+ self._virtuals_manager._treeVirtuals = {}
+
+ return self._virtuals_manager.getvirtuals()
+
+ def _populate_treeVirtuals_if_needed(self, vartree):
+ """Reduce the provides into a list by CP."""
+ if self._virtuals_manager._treeVirtuals is None:
+ if self.local_config:
+ self._virtuals_manager._populate_treeVirtuals(vartree)
+ else:
+ self._virtuals_manager._treeVirtuals = {}
+
+ def __delitem__(self,mykey):
+ self.modifying()
+ for x in self.lookuplist:
+ if x != None:
+ if mykey in x:
+ del x[mykey]
+
+ def __getitem__(self,mykey):
+ for d in self.lookuplist:
+ if mykey in d:
+ return d[mykey]
+ return '' # for backward compat, don't raise KeyError
+
+ def get(self, k, x=None):
+ for d in self.lookuplist:
+ if k in d:
+ return d[k]
+ return x
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ v = self
+ for d in reversed(self.lookuplist):
+ v = d.pop(key, v)
+ if v is self:
+ if args:
+ return args[0]
+ raise KeyError(key)
+ return v
+
+ def __contains__(self, mykey):
+ """Called to implement membership test operators (in and not in)."""
+ for d in self.lookuplist:
+ if mykey in d:
+ return True
+ return False
+
+ def setdefault(self, k, x=None):
+ v = self.get(k)
+ if v is not None:
+ return v
+ else:
+ self[k] = x
+ return x
+
+ def keys(self):
+ return list(self)
+
+ def __iter__(self):
+ keys = set()
+ for d in self.lookuplist:
+ keys.update(d)
+ return iter(keys)
+
+ def iterkeys(self):
+ return iter(self)
+
+ def iteritems(self):
+ for k in self:
+ yield (k, self[k])
+
+ def items(self):
+ return list(self.iteritems())
+
+ def __setitem__(self,mykey,myvalue):
+ "set a value; will be thrown away at reset() time"
+ if not isinstance(myvalue, basestring):
+ raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
+
+ # Avoid potential UnicodeDecodeError exceptions later.
+ mykey = _unicode_decode(mykey)
+ myvalue = _unicode_decode(myvalue)
+
+ self.modifying()
+ self.modifiedkeys.append(mykey)
+ self.configdict["env"][mykey]=myvalue
+
+ def environ(self):
+ "return our locally-maintained environment"
+ mydict={}
+ environ_filter = self._environ_filter
+
+ eapi = self.get('EAPI')
+ phase = self.get('EBUILD_PHASE')
+ filter_calling_env = False
+ if self.mycpv is not None and \
+ phase not in ('clean', 'cleanrm', 'depend', 'fetch'):
+ temp_dir = self.get('T')
+ if temp_dir is not None and \
+ os.path.exists(os.path.join(temp_dir, 'environment')):
+ filter_calling_env = True
+
+ environ_whitelist = self._environ_whitelist
+ for x in self:
+ if x in environ_filter:
+ continue
+ myvalue = self[x]
+ if not isinstance(myvalue, basestring):
+ writemsg(_("!!! Non-string value in config: %s=%s\n") % \
+ (x, myvalue), noiselevel=-1)
+ continue
+ if filter_calling_env and \
+ x not in environ_whitelist and \
+ not self._environ_whitelist_re.match(x):
+ # Do not allow anything to leak into the ebuild
+ # environment unless it is explicitly whitelisted.
+ # This ensures that variables unset by the ebuild
+ # remain unset (bug #189417).
+ continue
+ mydict[x] = myvalue
+ if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
+ writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
+ mydict["HOME"]=mydict["BUILD_PREFIX"][:]
+
+ if filter_calling_env:
+ if phase:
+ whitelist = []
+ if "rpm" == phase:
+ whitelist.append("RPMDIR")
+ for k in whitelist:
+ v = self.get(k)
+ if v is not None:
+ mydict[k] = v
+
+ # At some point we may want to stop exporting FEATURES to the ebuild
+ # environment, in order to prevent ebuilds from abusing it. In
+ # preparation for that, export it as PORTAGE_FEATURES so that bashrc
+ # users will be able to migrate any FEATURES conditional code to
+ # use this alternative variable.
+ mydict["PORTAGE_FEATURES"] = self["FEATURES"]
+
+ # Filtered by IUSE and implicit IUSE.
+ mydict["USE"] = self.get("PORTAGE_USE", "")
+
+ # Don't export AA to the ebuild environment in EAPIs that forbid it
+ if not eapi_exports_AA(eapi):
+ mydict.pop("AA", None)
+
+ if not eapi_exports_merge_type(eapi):
+ mydict.pop("MERGE_TYPE", None)
+
+ # Prefix variables are supported starting with EAPI 3.
+ if phase == 'depend' or eapi is None or not eapi_supports_prefix(eapi):
+ mydict.pop("ED", None)
+ mydict.pop("EPREFIX", None)
+ mydict.pop("EROOT", None)
+
+ if phase == 'depend':
+ mydict.pop('FILESDIR', None)
+
+ if phase not in ("pretend", "setup", "preinst", "postinst") or \
+ not eapi_exports_replace_vars(eapi):
+ mydict.pop("REPLACING_VERSIONS", None)
+
+ if phase not in ("prerm", "postrm") or \
+ not eapi_exports_replace_vars(eapi):
+ mydict.pop("REPLACED_BY_VERSION", None)
+
+ return mydict
+
+ def thirdpartymirrors(self):
+ if getattr(self, "_thirdpartymirrors", None) is None:
+ profileroots = [os.path.join(self["PORTDIR"], "profiles")]
+ for x in shlex_split(self.get("PORTDIR_OVERLAY", "")):
+ profileroots.insert(0, os.path.join(x, "profiles"))
+ thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
+ self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
+ return self._thirdpartymirrors
+
+ def archlist(self):
+ _archlist = []
+ for myarch in self["PORTAGE_ARCHLIST"].split():
+ _archlist.append(myarch)
+ _archlist.append("~" + myarch)
+ return _archlist
+
+ def selinux_enabled(self):
+ if getattr(self, "_selinux_enabled", None) is None:
+ self._selinux_enabled = 0
+ if "selinux" in self["USE"].split():
+ if selinux:
+ if selinux.is_selinux_enabled() == 1:
+ self._selinux_enabled = 1
+ else:
+ self._selinux_enabled = 0
+ else:
+ writemsg(_("!!! SELinux module not found. Please verify that it was installed.\n"),
+ noiselevel=-1)
+ self._selinux_enabled = 0
+
+ return self._selinux_enabled
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
diff --git a/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.py b/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.py
new file mode 100644
index 0000000..3fab4da
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.py
@@ -0,0 +1,42 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['deprecated_profile_check']
+
+import io
+
+from portage import os, _encodings, _unicode_encode
+from portage.const import DEPRECATED_PROFILE_FILE
+from portage.localization import _
+from portage.output import colorize
+from portage.util import writemsg
+
+def deprecated_profile_check(settings=None):
+ config_root = "/"
+ if settings is not None:
+ config_root = settings["PORTAGE_CONFIGROOT"]
+ deprecated_profile_file = os.path.join(config_root,
+ DEPRECATED_PROFILE_FILE)
+ if not os.access(deprecated_profile_file, os.R_OK):
+ return False
+ dcontent = io.open(_unicode_encode(deprecated_profile_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace').readlines()
+ writemsg(colorize("BAD", _("\n!!! Your current profile is "
+ "deprecated and not supported anymore.")) + "\n", noiselevel=-1)
+ writemsg(colorize("BAD", _("!!! Use eselect profile to update your "
+ "profile.")) + "\n", noiselevel=-1)
+ if not dcontent:
+ writemsg(colorize("BAD", _("!!! Please refer to the "
+ "Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
+ return True
+ newprofile = dcontent[0]
+ writemsg(colorize("BAD", _("!!! Please upgrade to the "
+ "following profile if possible:")) + "\n", noiselevel=-1)
+ writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
+ if len(dcontent) > 1:
+ writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
+ for myline in dcontent[1:]:
+ writemsg(myline, noiselevel=-1)
+ writemsg("\n\n", noiselevel=-1)
+ return True
diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py
new file mode 100644
index 0000000..1e34b14
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py
@@ -0,0 +1,167 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['digestcheck']
+
+import warnings
+
+from portage import os, _encodings, _unicode_decode
+from portage.exception import DigestException, FileNotFound
+from portage.localization import _
+from portage.manifest import Manifest
+from portage.output import EOutput
+from portage.util import writemsg
+
+def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
+ """
+ Verifies checksums. Assumes all files have been downloaded.
+ @rtype: int
+ @returns: 1 on success and 0 on failure
+ """
+
+ if justmanifest is not None:
+ warnings.warn("The justmanifest parameter of the " + \
+ "portage.package.ebuild.digestcheck.digestcheck()" + \
+ " function is now unused.",
+ DeprecationWarning, stacklevel=2)
+ justmanifest = None
+
+ if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
+ return 1
+ allow_missing = "allow-missing-manifests" in mysettings.features
+ pkgdir = mysettings["O"]
+ manifest_path = os.path.join(pkgdir, "Manifest")
+ if not os.path.exists(manifest_path):
+ if allow_missing:
+ return 1
+ writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path,
+ noiselevel=-1)
+ if strict:
+ return 0
+ else:
+ return 1
+ if mf is None:
+ mf = Manifest(pkgdir, mysettings["DISTDIR"])
+ manifest_empty = True
+ for d in mf.fhashdict.values():
+ if d:
+ manifest_empty = False
+ break
+ if manifest_empty:
+ writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path,
+ noiselevel=-1)
+ if strict:
+ return 0
+ else:
+ return 1
+ eout = EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
+ try:
+ if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
+ eout.ebegin(_("checking ebuild checksums ;-)"))
+ mf.checkTypeHashes("EBUILD")
+ eout.eend(0)
+ eout.ebegin(_("checking auxfile checksums ;-)"))
+ mf.checkTypeHashes("AUX")
+ eout.eend(0)
+ eout.ebegin(_("checking miscfile checksums ;-)"))
+ mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
+ eout.eend(0)
+ for f in myfiles:
+ eout.ebegin(_("checking %s ;-)") % f)
+ ftype = mf.findFile(f)
+ if ftype is None:
+ eout.eend(1)
+ writemsg(_("\n!!! Missing digest for '%s'\n") % (f,),
+ noiselevel=-1)
+ return 0
+ mf.checkFileHashes(ftype, f)
+ eout.eend(0)
+ except FileNotFound as e:
+ eout.eend(1)
+ writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e),
+ noiselevel=-1)
+ return 0
+ except DigestException as e:
+ eout.eend(1)
+ writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
+ writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
+ writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
+ writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
+ return 0
+ if allow_missing:
+ # In this case we ignore any missing digests that
+ # would otherwise be detected below.
+ return 1
+ # Make sure that all of the ebuilds are actually listed in the Manifest.
+ for f in os.listdir(pkgdir):
+ pf = None
+ if f[-7:] == '.ebuild':
+ pf = f[:-7]
+ if pf is not None and not mf.hasFile("EBUILD", f):
+ writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
+ os.path.join(pkgdir, f), noiselevel=-1)
+ if strict:
+ return 0
+ # epatch will just grab all the patches out of a directory, so we have to
+ # make sure there aren't any foreign files that it might grab.
+ filesdir = os.path.join(pkgdir, "files")
+
+ for parent, dirs, files in os.walk(filesdir):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='replace')
+ writemsg(_("!!! Path contains invalid "
+ "character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], parent), noiselevel=-1)
+ if strict:
+ return 0
+ continue
+ for d in dirs:
+ d_bytes = d
+ try:
+ d = _unicode_decode(d,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ d = _unicode_decode(d,
+ encoding=_encodings['fs'], errors='replace')
+ writemsg(_("!!! Path contains invalid "
+ "character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], os.path.join(parent, d)),
+ noiselevel=-1)
+ if strict:
+ return 0
+ dirs.remove(d_bytes)
+ continue
+ if d.startswith(".") or d == "CVS":
+ dirs.remove(d_bytes)
+ for f in files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='replace')
+ if f.startswith("."):
+ continue
+ f = os.path.join(parent, f)[len(filesdir) + 1:]
+ writemsg(_("!!! File name contains invalid "
+ "character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], f), noiselevel=-1)
+ if strict:
+ return 0
+ continue
+ if f.startswith("."):
+ continue
+ f = os.path.join(parent, f)[len(filesdir) + 1:]
+ file_type = mf.findFile(f)
+ if file_type != "AUX" and not f.startswith("digest-"):
+ writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
+ os.path.join(filesdir, f), noiselevel=-1)
+ if strict:
+ return 0
+ return 1
diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestgen.py b/portage_with_autodep/pym/portage/package/ebuild/digestgen.py
new file mode 100644
index 0000000..eb7210e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/digestgen.py
@@ -0,0 +1,202 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['digestgen']
+
+import errno
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
+)
+
+from portage import os
+from portage.const import MANIFEST2_REQUIRED_HASH
+from portage.dbapi.porttree import FetchlistDict
+from portage.dep import use_reduce
+from portage.exception import InvalidDependString, FileNotFound, \
+ PermissionDenied, PortagePackageException
+from portage.localization import _
+from portage.manifest import Manifest
+from portage.output import colorize
+from portage.package.ebuild.fetch import fetch
+from portage.util import writemsg, writemsg_stdout
+from portage.versions import catsplit
+
+def digestgen(myarchives=None, mysettings=None, myportdb=None):
+ """
+ Generates a digest file if missing. Fetches files if necessary.
+ NOTE: myarchives and mysettings used to be positional arguments,
+ so their order must be preserved for backward compatibility.
+ @param mysettings: the ebuild config (mysettings["O"] must correspond
+ to the ebuild's parent directory)
+ @type mysettings: config
+ @param myportdb: a portdbapi instance
+ @type myportdb: portdbapi
+ @rtype: int
+ @returns: 1 on success and 0 on failure
+ """
+ if mysettings is None or myportdb is None:
+ raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.")
+
+ try:
+ portage._doebuild_manifest_exempt_depend += 1
+ distfiles_map = {}
+ fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
+ for cpv in fetchlist_dict:
+ try:
+ for myfile in fetchlist_dict[cpv]:
+ distfiles_map.setdefault(myfile, []).append(cpv)
+ except InvalidDependString as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+ return 0
+ mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
+ manifest1_compat = False
+ mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
+ fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
+ # Don't require all hashes since that can trigger excessive
+ # fetches when sufficient digests already exist. To ease transition
+ # while Manifest 1 is being removed, only require hashes that will
+ # exist before and after the transition.
+ required_hash_types = set()
+ required_hash_types.add("size")
+ required_hash_types.add(MANIFEST2_REQUIRED_HASH)
+ dist_hashes = mf.fhashdict.get("DIST", {})
+
+ # To avoid accidental regeneration of digests with the incorrect
+ # files (such as partially downloaded files), trigger the fetch
+ # code if the file exists and it's size doesn't match the current
+ # manifest entry. If there really is a legitimate reason for the
+ # digest to change, `ebuild --force digest` can be used to avoid
+ # triggering this code (or else the old digests can be manually
+ # removed from the Manifest).
+ missing_files = []
+ for myfile in distfiles_map:
+ myhashes = dist_hashes.get(myfile)
+ if not myhashes:
+ try:
+ st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError:
+ st = None
+ if st is None or st.st_size == 0:
+ missing_files.append(myfile)
+ continue
+ size = myhashes.get("size")
+
+ try:
+ st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ if size == 0:
+ missing_files.append(myfile)
+ continue
+ if required_hash_types.difference(myhashes):
+ missing_files.append(myfile)
+ continue
+ else:
+ if st.st_size == 0 or size is not None and size != st.st_size:
+ missing_files.append(myfile)
+ continue
+
+ if missing_files:
+ mytree = os.path.realpath(os.path.dirname(
+ os.path.dirname(mysettings["O"])))
+ for myfile in missing_files:
+ uris = set()
+ all_restrict = set()
+ for cpv in distfiles_map[myfile]:
+ uris.update(myportdb.getFetchMap(
+ cpv, mytree=mytree)[myfile])
+ restrict = myportdb.aux_get(cpv, ['RESTRICT'],
+ mytree=mytree)[0]
+ # Here we ignore conditional parts of RESTRICT since
+ # they don't apply unconditionally. Assume such
+ # conditionals only apply on the client side where
+ # digestgen() does not need to be called.
+ all_restrict.update(use_reduce(restrict,
+ flat=True, matchnone=True))
+
+ # fetch() uses CATEGORY and PF to display a message
+ # when fetch restriction is triggered.
+ cat, pf = catsplit(cpv)
+ mysettings["CATEGORY"] = cat
+ mysettings["PF"] = pf
+
+ # fetch() uses PORTAGE_RESTRICT to control fetch
+ # restriction, which is only applied to files that
+ # are not fetchable via a mirror:// URI.
+ mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)
+
+ try:
+ st = os.stat(os.path.join(
+ mysettings["DISTDIR"],myfile))
+ except OSError:
+ st = None
+
+ if not fetch({myfile : uris}, mysettings):
+ myebuild = os.path.join(mysettings["O"],
+ catsplit(cpv)[1] + ".ebuild")
+ spawn_nofetch(myportdb, myebuild,
+ settings=mysettings)
+ writemsg(_("!!! Fetch failed for %s, can't update "
+ "Manifest\n") % myfile, noiselevel=-1)
+ if myfile in dist_hashes and \
+ st is not None and st.st_size > 0:
+ # stat result is obtained before calling fetch(),
+ # since fetch may rename the existing file if the
+ # digest does not match.
+ writemsg(_("!!! If you would like to "
+ "forcefully replace the existing "
+ "Manifest entry\n!!! for %s, use "
+ "the following command:\n") % myfile + \
+ "!!! " + colorize("INFORM",
+ "ebuild --force %s manifest" % \
+ os.path.basename(myebuild)) + "\n",
+ noiselevel=-1)
+ return 0
+ writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
+ try:
+ mf.create(assumeDistHashesSometimes=True,
+ assumeDistHashesAlways=(
+ "assume-digests" in mysettings.features))
+ except FileNotFound as e:
+ writemsg(_("!!! File %s doesn't exist, can't update "
+ "Manifest\n") % e, noiselevel=-1)
+ return 0
+ except PortagePackageException as e:
+ writemsg(("!!! %s\n") % (e,), noiselevel=-1)
+ return 0
+ try:
+ mf.write(sign=False)
+ except PermissionDenied as e:
+ writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
+ return 0
+ if "assume-digests" not in mysettings.features:
+ distlist = list(mf.fhashdict.get("DIST", {}))
+ distlist.sort()
+ auto_assumed = []
+ for filename in distlist:
+ if not os.path.exists(
+ os.path.join(mysettings["DISTDIR"], filename)):
+ auto_assumed.append(filename)
+ if auto_assumed:
+ mytree = os.path.realpath(
+ os.path.dirname(os.path.dirname(mysettings["O"])))
+ cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
+ pkgs = myportdb.cp_list(cp, mytree=mytree)
+ pkgs.sort()
+ writemsg_stdout(" digest.assumed" + colorize("WARN",
+ str(len(auto_assumed)).rjust(18)) + "\n")
+ for pkg_key in pkgs:
+ fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
+ pv = pkg_key.split("/")[1]
+ for filename in auto_assumed:
+ if filename in fetchlist:
+ writemsg_stdout(
+ " %s::%s\n" % (pv, filename))
+ return 1
+ finally:
+ portage._doebuild_manifest_exempt_depend -= 1
diff --git a/portage_with_autodep/pym/portage/package/ebuild/doebuild.py b/portage_with_autodep/pym/portage/package/ebuild/doebuild.py
new file mode 100644
index 0000000..c76c1ed
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/doebuild.py
@@ -0,0 +1,1791 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['doebuild', 'doebuild_environment', 'spawn', 'spawnebuild']
+
+import gzip
+import errno
+import io
+from itertools import chain
+import logging
+import os as _os
+import re
+import shutil
+import signal
+import stat
+import sys
+import tempfile
+from textwrap import wrap
+import time
+import warnings
+import zlib
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.config:check_config_instance',
+ 'portage.package.ebuild.digestcheck:digestcheck',
+ 'portage.package.ebuild.digestgen:digestgen',
+ 'portage.package.ebuild.fetch:fetch',
+ 'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
+ 'portage.util.ExtractKernelVersion:ExtractKernelVersion'
+)
+
+from portage import auxdbkeys, bsd_chflags, \
+ eapi_is_supported, merge, os, selinux, \
+ unmerge, _encodings, _parse_eapi_ebuild_head, _os_merge, \
+ _shell_quote, _unicode_decode, _unicode_encode
+from portage.const import EBUILD_SH_ENV_FILE, EBUILD_SH_ENV_DIR, \
+ EBUILD_SH_BINARY, INVALID_ENV_FILE, MISC_SH_BINARY
+from portage.data import portage_gid, portage_uid, secpass, \
+ uid, userpriv_groups
+from portage.dbapi.porttree import _parse_uri_map
+from portage.dep import Atom, check_required_use, \
+ human_readable_required_use, paren_enclose, use_reduce
+from portage.eapi import eapi_exports_KV, eapi_exports_merge_type, \
+ eapi_exports_replace_vars, eapi_has_required_use, \
+ eapi_has_src_prepare_and_src_configure, eapi_has_pkg_pretend
+from portage.elog import elog_process
+from portage.elog.messages import eerror, eqawarn
+from portage.exception import DigestException, FileNotFound, \
+ IncorrectParameter, InvalidDependString, PermissionDenied, \
+ UnsupportedAPIException
+from portage.localization import _
+from portage.manifest import Manifest
+from portage.output import style_to_ansi_code
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+from portage.util import apply_recursive_permissions, \
+ apply_secpass_permissions, noiselimit, normalize_path, \
+ writemsg, writemsg_stdout, write_atomic
+from portage.util.lafilefixer import rewrite_lafile
+from portage.versions import _pkgsplit
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.EbuildSpawnProcess import EbuildSpawnProcess
+from _emerge.Package import Package
+from _emerge.PollScheduler import PollScheduler
+from _emerge.RootConfig import RootConfig
+
+_unsandboxed_phases = frozenset([
+ "clean", "cleanrm", "config",
+ "help", "info", "postinst",
+ "preinst", "pretend", "postrm",
+ "prerm", "setup"
+])
+
+def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
+ """
+ All proper ebuild phases which execute ebuild.sh are spawned
+ via this function. No exceptions.
+ """
+
+ if phase in _unsandboxed_phases:
+ kwargs['free'] = True
+
+ if phase == 'depend':
+ kwargs['droppriv'] = 'userpriv' in settings.features
+
+ if actionmap is not None and phase in actionmap:
+ kwargs.update(actionmap[phase]["args"])
+ cmd = actionmap[phase]["cmd"] % phase
+ else:
+ if phase == 'cleanrm':
+ ebuild_sh_arg = 'clean'
+ else:
+ ebuild_sh_arg = phase
+
+ cmd = "%s %s" % (_shell_quote(
+ os.path.join(settings["PORTAGE_BIN_PATH"],
+ os.path.basename(EBUILD_SH_BINARY))),
+ ebuild_sh_arg)
+
+ settings['EBUILD_PHASE'] = phase
+ try:
+ return spawn(cmd, settings, **kwargs)
+ finally:
+ settings.pop('EBUILD_PHASE', None)
+
+def _spawn_phase(phase, settings, actionmap=None, **kwargs):
+ if kwargs.get('returnpid'):
+ return _doebuild_spawn(phase, settings, actionmap=actionmap, **kwargs)
+
+ ebuild_phase = EbuildPhase(actionmap=actionmap, background=False,
+ phase=phase, scheduler=PollScheduler().sched_iface,
+ settings=settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ return ebuild_phase.returncode
+
+def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
+ debug=False, use_cache=None, db=None):
+ """
+ Create and store environment variable in the config instance
+ that's passed in as the "settings" parameter. This will raise
+ UnsupportedAPIException if the given ebuild has an unsupported
+ EAPI. All EAPI dependent code comes last, so that essential
+ variables like PORTAGE_BUILDDIR are still initialized even in
+ cases when UnsupportedAPIException needs to be raised, which
+ can be useful when uninstalling a package that has corrupt
+ EAPI metadata.
+ The myroot and use_cache parameters are unused.
+ """
+ myroot = None
+ use_cache = None
+
+ if settings is None:
+ raise TypeError("settings argument is required")
+
+ if db is None:
+ raise TypeError("db argument is required")
+
+ mysettings = settings
+ mydbapi = db
+ ebuild_path = os.path.abspath(myebuild)
+ pkg_dir = os.path.dirname(ebuild_path)
+ mytree = os.path.dirname(os.path.dirname(pkg_dir))
+
+ if "CATEGORY" in mysettings.configdict["pkg"]:
+ cat = mysettings.configdict["pkg"]["CATEGORY"]
+ else:
+ cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
+
+ mypv = os.path.basename(ebuild_path)[:-7]
+
+ mycpv = cat+"/"+mypv
+ mysplit = _pkgsplit(mypv)
+ if mysplit is None:
+ raise IncorrectParameter(
+ _("Invalid ebuild path: '%s'") % myebuild)
+
+ # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
+ # so that the caller can override it.
+ tmpdir = mysettings["PORTAGE_TMPDIR"]
+
+ if mydo == 'depend':
+ if mycpv != mysettings.mycpv:
+ # Don't pass in mydbapi here since the resulting aux_get
+ # call would lead to infinite 'depend' phase recursion.
+ mysettings.setcpv(mycpv)
+ else:
+ # If EAPI isn't in configdict["pkg"], it means that setcpv()
+ # hasn't been called with the mydb argument, so we have to
+ # call it here (portage code always calls setcpv properly,
+ # but api consumers might not).
+ if mycpv != mysettings.mycpv or \
+ "EAPI" not in mysettings.configdict["pkg"]:
+ # Reload env.d variables and reset any previous settings.
+ mysettings.reload()
+ mysettings.reset()
+ mysettings.setcpv(mycpv, mydb=mydbapi)
+
+ # config.reset() might have reverted a change made by the caller,
+ # so restore it to its original value. Sandbox needs canonical
+ # paths, so realpath it.
+ mysettings["PORTAGE_TMPDIR"] = os.path.realpath(tmpdir)
+
+ mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
+ mysettings["EBUILD_PHASE"] = mydo
+
+ # Set requested Python interpreter for Portage helpers.
+ mysettings['PORTAGE_PYTHON'] = portage._python_interpreter
+
+ # This is used by assert_sigpipe_ok() that's used by the ebuild
+ # unpack() helper. SIGPIPE is typically 13, but its better not
+ # to assume that.
+ mysettings['PORTAGE_SIGPIPE_STATUS'] = str(128 + signal.SIGPIPE)
+
+ # We are disabling user-specific bashrc files.
+ mysettings["BASH_ENV"] = INVALID_ENV_FILE
+
+ if debug: # Otherwise it overrides emerge's settings.
+ # We have no other way to set debug... debug can't be passed in
+ # due to how it's coded... Don't overwrite this so we can use it.
+ mysettings["PORTAGE_DEBUG"] = "1"
+
+ mysettings["EBUILD"] = ebuild_path
+ mysettings["O"] = pkg_dir
+ mysettings.configdict["pkg"]["CATEGORY"] = cat
+ mysettings["FILESDIR"] = pkg_dir+"/files"
+ mysettings["PF"] = mypv
+
+ if hasattr(mydbapi, '_repo_info'):
+ repo_info = mydbapi._repo_info[mytree]
+ mysettings['PORTDIR'] = repo_info.portdir
+ mysettings['PORTDIR_OVERLAY'] = repo_info.portdir_overlay
+ mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo_info.name
+
+ mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
+ mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
+ mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
+
+ mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
+ mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
+
+ mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
+ mysettings["P"] = mysplit[0]+"-"+mysplit[1]
+ mysettings["PN"] = mysplit[0]
+ mysettings["PV"] = mysplit[1]
+ mysettings["PR"] = mysplit[2]
+
+ if noiselimit < 0:
+ mysettings["PORTAGE_QUIET"] = "1"
+
+ if mysplit[2] == "r0":
+ mysettings["PVR"]=mysplit[1]
+ else:
+ mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
+
+ if "PATH" in mysettings:
+ mysplit=mysettings["PATH"].split(":")
+ else:
+ mysplit=[]
+ # Note: PORTAGE_BIN_PATH may differ from the global constant
+ # when portage is reinstalling itself.
+ portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
+ if portage_bin_path not in mysplit:
+ mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
+
+ # All temporary directories should be subdirectories of
+ # $PORTAGE_TMPDIR/portage, since it's common for /tmp and /var/tmp
+ # to be mounted with the "noexec" option (see bug #346899).
+ mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
+ mysettings["PKG_TMPDIR"] = mysettings["BUILD_PREFIX"]+"/._unmerge_"
+
+ # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
+ # locations in order to prevent interference.
+ if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
+ mysettings["PORTAGE_BUILDDIR"] = os.path.join(
+ mysettings["PKG_TMPDIR"],
+ mysettings["CATEGORY"], mysettings["PF"])
+ else:
+ mysettings["PORTAGE_BUILDDIR"] = os.path.join(
+ mysettings["BUILD_PREFIX"],
+ mysettings["CATEGORY"], mysettings["PF"])
+
+ mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
+ mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
+ mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
+ mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
+
+ # Prefix forward compatability
+ mysettings["ED"] = mysettings["D"]
+
+ mysettings["PORTAGE_BASHRC"] = os.path.join(
+ mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE)
+ mysettings["PM_EBUILD_HOOK_DIR"] = os.path.join(
+ mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_DIR)
+
+ # Allow color.map to control colors associated with einfo, ewarn, etc...
+ mycolors = []
+ for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
+ mycolors.append("%s=$'%s'" % \
+ (c, style_to_ansi_code(c)))
+ mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
+
+ # All EAPI dependent code comes last, so that essential variables
+ # like PORTAGE_BUILDDIR are still initialized even in cases when
+ # UnsupportedAPIException needs to be raised, which can be useful
+ # when uninstalling a package that has corrupt EAPI metadata.
+ eapi = None
+ if mydo == 'depend' and 'EAPI' not in mysettings.configdict['pkg']:
+ if eapi is None and 'parse-eapi-ebuild-head' in mysettings.features:
+ eapi = _parse_eapi_ebuild_head(
+ io.open(_unicode_encode(ebuild_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace'))
+
+ if eapi is not None:
+ if not eapi_is_supported(eapi):
+ raise UnsupportedAPIException(mycpv, eapi)
+ mysettings.configdict['pkg']['EAPI'] = eapi
+
+ if mydo != "depend":
+ # Metadata vars such as EAPI and RESTRICT are
+ # set by the above config.setcpv() call.
+ eapi = mysettings["EAPI"]
+ if not eapi_is_supported(eapi):
+ # can't do anything with this.
+ raise UnsupportedAPIException(mycpv, eapi)
+
+ if hasattr(mydbapi, "getFetchMap") and \
+ ("A" not in mysettings.configdict["pkg"] or \
+ "AA" not in mysettings.configdict["pkg"]):
+ src_uri, = mydbapi.aux_get(mysettings.mycpv,
+ ["SRC_URI"], mytree=mytree)
+ metadata = {
+ "EAPI" : eapi,
+ "SRC_URI" : src_uri,
+ }
+ use = frozenset(mysettings["PORTAGE_USE"].split())
+ try:
+ uri_map = _parse_uri_map(mysettings.mycpv, metadata, use=use)
+ except InvalidDependString:
+ mysettings.configdict["pkg"]["A"] = ""
+ else:
+ mysettings.configdict["pkg"]["A"] = " ".join(uri_map)
+
+ try:
+ uri_map = _parse_uri_map(mysettings.mycpv, metadata)
+ except InvalidDependString:
+ mysettings.configdict["pkg"]["AA"] = ""
+ else:
+ mysettings.configdict["pkg"]["AA"] = " ".join(uri_map)
+
+ if not eapi_exports_KV(eapi):
+ # Discard KV for EAPIs that don't support it. Cache KV is restored
+ # from the backupenv whenever config.reset() is called.
+ mysettings.pop('KV', None)
+ elif mydo != 'depend' and 'KV' not in mysettings and \
+ mydo in ('compile', 'config', 'configure', 'info',
+ 'install', 'nofetch', 'postinst', 'postrm', 'preinst',
+ 'prepare', 'prerm', 'setup', 'test', 'unpack'):
+ mykv, err1 = ExtractKernelVersion(
+ os.path.join(mysettings['EROOT'], "usr/src/linux"))
+ if mykv:
+ # Regular source tree
+ mysettings["KV"] = mykv
+ else:
+ mysettings["KV"] = ""
+ mysettings.backup_changes("KV")
+
+_doebuild_manifest_cache = None
+_doebuild_broken_ebuilds = set()
+_doebuild_broken_manifests = set()
+_doebuild_commands_without_builddir = (
+ 'clean', 'cleanrm', 'depend', 'digest',
+ 'fetch', 'fetchall', 'help', 'manifest'
+)
+
+def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
+ fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
+ mydbapi=None, vartree=None, prev_mtimes=None,
+ fd_pipes=None, returnpid=False):
+ """
+ Wrapper function that invokes specific ebuild phases through the spawning
+ of ebuild.sh
+
+ @param myebuild: name of the ebuild to invoke the phase on (CPV)
+ @type myebuild: String
+ @param mydo: Phase to run
+ @type mydo: String
+ @param myroot: $ROOT (usually '/', see man make.conf)
+ @type myroot: String
+ @param mysettings: Portage Configuration
+ @type mysettings: instance of portage.config
+ @param debug: Turns on various debug information (eg, debug for spawn)
+ @type debug: Boolean
+ @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
+ @type listonly: Boolean
+ @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
+ @type fetchonly: Boolean
+ @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
+ @type cleanup: Boolean
+ @param dbkey: A file path where metadata generated by the 'depend' phase
+ will be written.
+ @type dbkey: String
+ @param use_cache: Enables the cache
+ @type use_cache: Boolean
+ @param fetchall: Used to wrap fetch(), fetches all URIs (even ones invalid due to USE conditionals)
+ @type fetchall: Boolean
+ @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
+ @type tree: String
+ @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
+ @type mydbapi: portdbapi instance
+ @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
+ @type vartree: vartree instance
+ @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
+ @type prev_mtimes: dictionary
+ @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout }
+ for example.
+ @type fd_pipes: Dictionary
+ @param returnpid: Return a list of process IDs for a successful spawn, or
+ an integer value if spawn is unsuccessful. NOTE: This requires the
+ caller clean up all returned PIDs.
+ @type returnpid: Boolean
+ @rtype: Boolean
+ @returns:
+ 1. 0 for success
+ 2. 1 for error
+
+ Most errors have an accompanying error message.
+
+ listonly and fetchonly are only really necessary for operations involving 'fetch'
+ prev_mtimes are only necessary for merge operations.
+ Other variables may not be strictly required, many have defaults that are set inside of doebuild.
+
+ """
+
+ if not tree:
+ writemsg("Warning: tree not specified to doebuild\n")
+ tree = "porttree"
+
+ # chunked out deps for each phase, so that ebuild binary can use it
+ # to collapse targets down.
+ actionmap_deps={
+ "pretend" : [],
+ "setup": ["pretend"],
+ "unpack": ["setup"],
+ "prepare": ["unpack"],
+ "configure": ["prepare"],
+ "compile":["configure"],
+ "test": ["compile"],
+ "install":["test"],
+ "rpm": ["install"],
+ "package":["install"],
+ }
+
+ if mydbapi is None:
+ mydbapi = portage.db[myroot][tree].dbapi
+
+ if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
+ vartree = portage.db[myroot]["vartree"]
+
+ features = mysettings.features
+
+ clean_phases = ("clean", "cleanrm")
+ validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
+ "config", "info", "setup", "depend", "pretend",
+ "fetch", "fetchall", "digest",
+ "unpack", "prepare", "configure", "compile", "test",
+ "install", "rpm", "qmerge", "merge",
+ "package","unmerge", "manifest"]
+
+ if mydo not in validcommands:
+ validcommands.sort()
+ writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
+ noiselevel=-1)
+ for vcount in range(len(validcommands)):
+ if vcount%6 == 0:
+ writemsg("\n!!! ", noiselevel=-1)
+ writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ return 1
+
+ if returnpid and mydo != 'depend':
+ warnings.warn("portage.doebuild() called " + \
+ "with returnpid parameter enabled. This usage will " + \
+ "not be supported in the future.",
+ DeprecationWarning, stacklevel=2)
+
+ if mydo == "fetchall":
+ fetchall = 1
+ mydo = "fetch"
+
+ parallel_fetchonly = mydo in ("fetch", "fetchall") and \
+ "PORTAGE_PARALLEL_FETCHONLY" in mysettings
+
+ if mydo not in clean_phases and not os.path.exists(myebuild):
+ writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
+ noiselevel=-1)
+ return 1
+
+ global _doebuild_manifest_cache
+ mf = None
+ if "strict" in features and \
+ "digest" not in features and \
+ tree == "porttree" and \
+ mydo not in ("digest", "manifest", "help") and \
+ not portage._doebuild_manifest_exempt_depend:
+ # Always verify the ebuild checksums before executing it.
+ global _doebuild_broken_ebuilds
+
+ if myebuild in _doebuild_broken_ebuilds:
+ return 1
+
+ pkgdir = os.path.dirname(myebuild)
+ manifest_path = os.path.join(pkgdir, "Manifest")
+
+ # Avoid checking the same Manifest several times in a row during a
+ # regen with an empty cache.
+ if _doebuild_manifest_cache is None or \
+ _doebuild_manifest_cache.getFullname() != manifest_path:
+ _doebuild_manifest_cache = None
+ if not os.path.exists(manifest_path):
+ out = portage.output.EOutput()
+ out.eerror(_("Manifest not found for '%s'") % (myebuild,))
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+ mf = Manifest(pkgdir, mysettings["DISTDIR"])
+
+ else:
+ mf = _doebuild_manifest_cache
+
+ try:
+ mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
+ except KeyError:
+ out = portage.output.EOutput()
+ out.eerror(_("Missing digest for '%s'") % (myebuild,))
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+ except FileNotFound:
+ out = portage.output.EOutput()
+ out.eerror(_("A file listed in the Manifest "
+ "could not be found: '%s'") % (myebuild,))
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+ except DigestException as e:
+ out = portage.output.EOutput()
+ out.eerror(_("Digest verification failed:"))
+ out.eerror("%s" % e.value[0])
+ out.eerror(_("Reason: %s") % e.value[1])
+ out.eerror(_("Got: %s") % e.value[2])
+ out.eerror(_("Expected: %s") % e.value[3])
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+
+ if mf.getFullname() in _doebuild_broken_manifests:
+ return 1
+
+ if mf is not _doebuild_manifest_cache:
+
+ # Make sure that all of the ebuilds are
+ # actually listed in the Manifest.
+ for f in os.listdir(pkgdir):
+ pf = None
+ if f[-7:] == '.ebuild':
+ pf = f[:-7]
+ if pf is not None and not mf.hasFile("EBUILD", f):
+ f = os.path.join(pkgdir, f)
+ if f not in _doebuild_broken_ebuilds:
+ out = portage.output.EOutput()
+ out.eerror(_("A file is not listed in the "
+ "Manifest: '%s'") % (f,))
+ _doebuild_broken_manifests.add(manifest_path)
+ return 1
+
+ # Only cache it if the above stray files test succeeds.
+ _doebuild_manifest_cache = mf
+
+ logfile=None
+ builddir_lock = None
+ tmpdir = None
+ tmpdir_orig = None
+
+ try:
+ if mydo in ("digest", "manifest", "help"):
+ # Temporarily exempt the depend phase from manifest checks, in case
+ # aux_get calls trigger cache generation.
+ portage._doebuild_manifest_exempt_depend += 1
+
+ # If we don't need much space and we don't need a constant location,
+ # we can temporarily override PORTAGE_TMPDIR with a random temp dir
+ # so that there's no need for locking and it can be used even if the
+ # user isn't in the portage group.
+ if mydo in ("info",):
+ tmpdir = tempfile.mkdtemp()
+ tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
+ mysettings["PORTAGE_TMPDIR"] = tmpdir
+
+ doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
+ use_cache, mydbapi)
+
+ if mydo in clean_phases:
+ builddir_lock = None
+ if not returnpid and \
+ 'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=PollScheduler().sched_iface,
+ settings=mysettings)
+ builddir_lock.lock()
+ try:
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+ finally:
+ if builddir_lock is not None:
+ builddir_lock.unlock()
+
+ restrict = set(mysettings.get('PORTAGE_RESTRICT', '').split())
+ # get possible slot information from the deps file
+ if mydo == "depend":
+ writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
+ if returnpid:
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+ elif isinstance(dbkey, dict):
+ warnings.warn("portage.doebuild() called " + \
+ "with dict dbkey argument. This usage will " + \
+ "not be supported in the future.",
+ DeprecationWarning, stacklevel=2)
+ mysettings["dbkey"] = ""
+ pr, pw = os.pipe()
+ fd_pipes = {
+ 0:sys.stdin.fileno(),
+ 1:sys.stdout.fileno(),
+ 2:sys.stderr.fileno(),
+ 9:pw}
+ mypids = _spawn_phase(mydo, mysettings, returnpid=True,
+ fd_pipes=fd_pipes)
+ os.close(pw) # belongs exclusively to the child process now
+ f = os.fdopen(pr, 'rb', 0)
+ for k, v in zip(auxdbkeys,
+ (_unicode_decode(line).rstrip('\n') for line in f)):
+ dbkey[k] = v
+ f.close()
+ retval = os.waitpid(mypids[0], 0)[1]
+ portage.process.spawned_pids.remove(mypids[0])
+ # If it got a signal, return the signal that was sent, but
+ # shift in order to distinguish it from a return value. (just
+ # like portage.process.spawn() would do).
+ if retval & 0xff:
+ retval = (retval & 0xff) << 8
+ else:
+ # Otherwise, return its exit code.
+ retval = retval >> 8
+ if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
+ # Don't trust bash's returncode if the
+ # number of lines is incorrect.
+ retval = 1
+ return retval
+ elif dbkey:
+ mysettings["dbkey"] = dbkey
+ else:
+ mysettings["dbkey"] = \
+ os.path.join(mysettings.depcachedir, "aux_db_key_temp")
+
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+
+ # Validate dependency metadata here to ensure that ebuilds with invalid
+ # data are never installed via the ebuild command. Don't bother when
+ # returnpid == True since there's no need to do this every time emerge
+ # executes a phase.
+ if tree == "porttree":
+ rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
+ if rval != os.EX_OK:
+ return rval
+
+ # The info phase is special because it uses mkdtemp so and
+ # user (not necessarily in the portage group) can run it.
+ if mydo not in ('info',) and \
+ mydo not in _doebuild_commands_without_builddir:
+ rval = _check_temp_dir(mysettings)
+ if rval != os.EX_OK:
+ return rval
+
+ if mydo == "unmerge":
+ return unmerge(mysettings["CATEGORY"],
+ mysettings["PF"], myroot, mysettings, vartree=vartree)
+
+ # Build directory creation isn't required for any of these.
+ # In the fetch phase, the directory is needed only for RESTRICT=fetch
+ # in order to satisfy the sane $PWD requirement (from bug #239560)
+ # when pkg_nofetch is spawned.
+ have_build_dirs = False
+ if not parallel_fetchonly and \
+ mydo not in ('digest', 'fetch', 'help', 'manifest'):
+ if not returnpid and \
+ 'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=PollScheduler().sched_iface,
+ settings=mysettings)
+ builddir_lock.lock()
+ mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
+ if mystatus:
+ return mystatus
+ have_build_dirs = True
+
+ # emerge handles logging externally
+ if not returnpid:
+ # PORTAGE_LOG_FILE is set by the
+ # above prepare_build_dirs() call.
+ logfile = mysettings.get("PORTAGE_LOG_FILE")
+
+ if have_build_dirs:
+ rval = _prepare_env_file(mysettings)
+ if rval != os.EX_OK:
+ return rval
+
+ if eapi_exports_merge_type(mysettings["EAPI"]) and \
+ "MERGE_TYPE" not in mysettings.configdict["pkg"]:
+ if tree == "porttree":
+ mysettings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ mysettings.configdict["pkg"]["MERGE_TYPE"] = "source"
+ elif tree == "bintree":
+ mysettings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ mysettings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ # NOTE: It's not possible to set REPLACED_BY_VERSION for prerm
+ # and postrm here, since we don't necessarily know what
+ # versions are being installed. This could be a problem
+ # for API consumers if they don't use dblink.treewalk()
+ # to execute prerm and postrm.
+ if eapi_exports_replace_vars(mysettings["EAPI"]) and \
+ (mydo in ("postinst", "preinst", "pretend", "setup") or \
+ ("noauto" not in features and not returnpid and \
+ (mydo in actionmap_deps or mydo in ("merge", "package", "qmerge")))):
+ if not vartree:
+ writemsg("Warning: vartree not given to doebuild. " + \
+ "Cannot set REPLACING_VERSIONS in pkg_{pretend,setup}\n")
+ else:
+ vardb = vartree.dbapi
+ cpv = mysettings.mycpv
+ cp = portage.versions.cpv_getkey(cpv)
+ slot = mysettings["SLOT"]
+ cpv_slot = cp + ":" + slot
+ mysettings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(cpv_slot) + \
+ vardb.match('='+cpv)))
+
+ # if any of these are being called, handle them -- running them out of
+ # the sandbox -- and stop now.
+ if mydo in ("config", "help", "info", "postinst",
+ "preinst", "pretend", "postrm", "prerm"):
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, logfile=logfile, returnpid=returnpid)
+
+ mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
+
+ # Only try and fetch the files if we are going to need them ...
+ # otherwise, if user has FEATURES=noauto and they run `ebuild clean
+ # unpack compile install`, we will try and fetch 4 times :/
+ need_distfiles = tree == "porttree" and \
+ (mydo in ("fetch", "unpack") or \
+ mydo not in ("digest", "manifest") and "noauto" not in features)
+ alist = set(mysettings.configdict["pkg"].get("A", "").split())
+ if need_distfiles:
+
+ src_uri, = mydbapi.aux_get(mysettings.mycpv,
+ ["SRC_URI"], mytree=os.path.dirname(os.path.dirname(
+ os.path.dirname(myebuild))))
+ metadata = {
+ "EAPI" : mysettings["EAPI"],
+ "SRC_URI" : src_uri,
+ }
+ use = frozenset(mysettings["PORTAGE_USE"].split())
+ try:
+ alist = _parse_uri_map(mysettings.mycpv, metadata, use=use)
+ aalist = _parse_uri_map(mysettings.mycpv, metadata)
+ except InvalidDependString as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Invalid SRC_URI for '%s'.\n") % mycpv,
+ noiselevel=-1)
+ del e
+ return 1
+
+ if "mirror" in features or fetchall:
+ fetchme = aalist
+ else:
+ fetchme = alist
+
+ dist_digests = None
+ if mf is not None:
+ dist_digests = mf.getTypeDigests("DIST")
+ if not fetch(fetchme, mysettings, listonly=listonly,
+ fetchonly=fetchonly, allow_missing_digests=True,
+ digests=dist_digests):
+ spawn_nofetch(mydbapi, myebuild, settings=mysettings)
+ if listonly:
+ # The convention for listonly mode is to report
+ # success in any case, even though fetch() may
+ # return unsuccessfully in order to trigger the
+ # nofetch phase.
+ return 0
+ return 1
+
+ if mydo == "fetch":
+ # Files are already checked inside fetch(),
+ # so do not check them again.
+ checkme = []
+ else:
+ checkme = alist
+
+ if mydo == "fetch" and listonly:
+ return 0
+
+ try:
+ if mydo == "manifest":
+ mf = None
+ _doebuild_manifest_cache = None
+ return not digestgen(mysettings=mysettings, myportdb=mydbapi)
+ elif mydo == "digest":
+ mf = None
+ _doebuild_manifest_cache = None
+ return not digestgen(mysettings=mysettings, myportdb=mydbapi)
+ elif mydo != 'fetch' and \
+ "digest" in mysettings.features:
+ # Don't do this when called by emerge or when called just
+ # for fetch (especially parallel-fetch) since it's not needed
+ # and it can interfere with parallel tasks.
+ mf = None
+ _doebuild_manifest_cache = None
+ digestgen(mysettings=mysettings, myportdb=mydbapi)
+ except PermissionDenied as e:
+ writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
+ if mydo in ("digest", "manifest"):
+ return 1
+
+ # See above comment about fetching only when needed
+ if tree == 'porttree' and \
+ not digestcheck(checkme, mysettings, "strict" in features, mf=mf):
+ return 1
+
+ if mydo == "fetch":
+ return 0
+
+ # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
+ if tree == 'porttree' and \
+ ((mydo != "setup" and "noauto" not in features) \
+ or mydo in ("install", "unpack")):
+ _prepare_fake_distdir(mysettings, alist)
+
+ #initial dep checks complete; time to process main commands
+ actionmap = _spawn_actionmap(mysettings)
+
+ # merge the deps in so we have again a 'full' actionmap
+ # be glad when this can die.
+ for x in actionmap:
+ if len(actionmap_deps.get(x, [])):
+ actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
+
+ if mydo in actionmap:
+ bintree = None
+ if mydo == "package":
+ # Make sure the package directory exists before executing
+ # this phase. This can raise PermissionDenied if
+ # the current user doesn't have write access to $PKGDIR.
+ if hasattr(portage, 'db'):
+ bintree = portage.db[mysettings["ROOT"]]["bintree"]
+ mysettings["PORTAGE_BINPKG_TMPFILE"] = \
+ bintree.getname(mysettings.mycpv) + \
+ ".%s" % (os.getpid(),)
+ bintree._ensure_dir(os.path.dirname(
+ mysettings["PORTAGE_BINPKG_TMPFILE"]))
+ else:
+ parent_dir = os.path.join(mysettings["PKGDIR"],
+ mysettings["CATEGORY"])
+ portage.util.ensure_dirs(parent_dir)
+ if not os.access(parent_dir, os.W_OK):
+ raise PermissionDenied(
+ "access('%s', os.W_OK)" % parent_dir)
+ retval = spawnebuild(mydo,
+ actionmap, mysettings, debug, logfile=logfile,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+
+ if retval == os.EX_OK:
+ if mydo == "package" and bintree is not None:
+ bintree.inject(mysettings.mycpv,
+ filename=mysettings["PORTAGE_BINPKG_TMPFILE"])
+ elif mydo=="qmerge":
+ # check to ensure install was run. this *only* pops up when users
+ # forget it and are using ebuild
+ if not os.path.exists(
+ os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
+ writemsg(_("!!! mydo=qmerge, but the install phase has not been run\n"),
+ noiselevel=-1)
+ return 1
+ # qmerge is a special phase that implies noclean.
+ if "noclean" not in mysettings.features:
+ mysettings.features.add("noclean")
+ #qmerge is specifically not supposed to do a runtime dep check
+ retval = merge(
+ mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
+ os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
+ myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
+ mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
+ elif mydo=="merge":
+ retval = spawnebuild("install", actionmap, mysettings, debug,
+ alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
+ returnpid=returnpid)
+ if retval != os.EX_OK:
+ # The merge phase handles this already. Callers don't know how
+ # far this function got, so we have to call elog_process() here
+ # so that it's only called once.
+ elog_process(mysettings.mycpv, mysettings)
+ if retval == os.EX_OK:
+ retval = merge(mysettings["CATEGORY"], mysettings["PF"],
+ mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
+ "build-info"), myroot, mysettings,
+ myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
+ vartree=vartree, prev_mtimes=prev_mtimes)
+ else:
+ writemsg_stdout(_("!!! Unknown mydo: %s\n") % mydo, noiselevel=-1)
+ return 1
+
+ return retval
+
+ finally:
+
+ if builddir_lock is not None:
+ builddir_lock.unlock()
+ if tmpdir:
+ mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
+ shutil.rmtree(tmpdir)
+
+ mysettings.pop("REPLACING_VERSIONS", None)
+
+ # Make sure that DISTDIR is restored to it's normal value before we return!
+ if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
+ mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
+ del mysettings["PORTAGE_ACTUAL_DISTDIR"]
+
+ if logfile and not returnpid:
+ try:
+ if os.stat(logfile).st_size == 0:
+ os.unlink(logfile)
+ except OSError:
+ pass
+
+ if mydo in ("digest", "manifest", "help"):
+ # If necessary, depend phase has been triggered by aux_get calls
+ # and the exemption is no longer needed.
+ portage._doebuild_manifest_exempt_depend -= 1
+
+def _check_temp_dir(settings):
+ if "PORTAGE_TMPDIR" not in settings or \
+ not os.path.isdir(settings["PORTAGE_TMPDIR"]):
+ writemsg(_("The directory specified in your "
+ "PORTAGE_TMPDIR variable, '%s',\n"
+ "does not exist. Please create this directory or "
+ "correct your PORTAGE_TMPDIR setting.\n") % \
+ settings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
+ return 1
+
+ # as some people use a separate PORTAGE_TMPDIR mount
+ # we prefer that as the checks below would otherwise be pointless
+ # for those people.
+ if os.path.exists(os.path.join(settings["PORTAGE_TMPDIR"], "portage")):
+ checkdir = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
+ else:
+ checkdir = settings["PORTAGE_TMPDIR"]
+
+ if not os.access(checkdir, os.W_OK):
+ writemsg(_("%s is not writable.\n"
+ "Likely cause is that you've mounted it as readonly.\n") % checkdir,
+ noiselevel=-1)
+ return 1
+
+ else:
+ fd = tempfile.NamedTemporaryFile(prefix="exectest-", dir=checkdir)
+ os.chmod(fd.name, 0o755)
+ if not os.access(fd.name, os.X_OK):
+ writemsg(_("Can not execute files in %s\n"
+ "Likely cause is that you've mounted it with one of the\n"
+ "following mount options: 'noexec', 'user', 'users'\n\n"
+ "Please make sure that portage can execute files in this directory.\n") % checkdir,
+ noiselevel=-1)
+ return 1
+
+ return os.EX_OK
+
+def _prepare_env_file(settings):
+ """
+ Extract environment.bz2 if it exists, but only if the destination
+ environment file doesn't already exist. There are lots of possible
+ states when doebuild() calls this function, and we want to avoid
+ clobbering an existing environment file.
+ """
+
+ env_extractor = BinpkgEnvExtractor(background=False,
+ scheduler=PollScheduler().sched_iface, settings=settings)
+
+ if env_extractor.dest_env_exists():
+ # There are lots of possible states when doebuild()
+ # calls this function, and we want to avoid
+ # clobbering an existing environment file.
+ return os.EX_OK
+
+ if not env_extractor.saved_env_exists():
+ # If the environment.bz2 doesn't exist, then ebuild.sh will
+ # source the ebuild as a fallback.
+ return os.EX_OK
+
+ env_extractor.start()
+ env_extractor.wait()
+ return env_extractor.returncode
+
+def _prepare_fake_distdir(settings, alist):
+ orig_distdir = settings["DISTDIR"]
+ settings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
+ edpath = settings["DISTDIR"] = \
+ os.path.join(settings["PORTAGE_BUILDDIR"], "distdir")
+ portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755)
+
+ # Remove any unexpected files or directories.
+ for x in os.listdir(edpath):
+ symlink_path = os.path.join(edpath, x)
+ st = os.lstat(symlink_path)
+ if x in alist and stat.S_ISLNK(st.st_mode):
+ continue
+ if stat.S_ISDIR(st.st_mode):
+ shutil.rmtree(symlink_path)
+ else:
+ os.unlink(symlink_path)
+
+ # Check for existing symlinks and recreate if necessary.
+ for x in alist:
+ symlink_path = os.path.join(edpath, x)
+ target = os.path.join(orig_distdir, x)
+ try:
+ link_target = os.readlink(symlink_path)
+ except OSError:
+ os.symlink(target, symlink_path)
+ else:
+ if link_target != target:
+ os.unlink(symlink_path)
+ os.symlink(target, symlink_path)
+
+def _spawn_actionmap(settings):
+ features = settings.features
+ restrict = settings["PORTAGE_RESTRICT"].split()
+ nosandbox = (("userpriv" in features) and \
+ ("usersandbox" not in features) and \
+ "userpriv" not in restrict and \
+ "nouserpriv" not in restrict)
+ if nosandbox and ("userpriv" not in features or \
+ "userpriv" in restrict or \
+ "nouserpriv" in restrict):
+ nosandbox = ("sandbox" not in features and \
+ "usersandbox" not in features)
+
+ if "depcheck" in features or "depcheckstrict" in features:
+ nosandbox = True
+
+ if not portage.process.sandbox_capable:
+ nosandbox = True
+
+ sesandbox = settings.selinux_enabled() and \
+ "sesandbox" in features
+
+ droppriv = "userpriv" in features and \
+ "userpriv" not in restrict and \
+ secpass >= 2
+
+ fakeroot = "fakeroot" in features
+
+ portage_bin_path = settings["PORTAGE_BIN_PATH"]
+ ebuild_sh_binary = os.path.join(portage_bin_path,
+ os.path.basename(EBUILD_SH_BINARY))
+ misc_sh_binary = os.path.join(portage_bin_path,
+ os.path.basename(MISC_SH_BINARY))
+ ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
+ misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
+
+ # args are for the to spawn function
+ actionmap = {
+"pretend": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
+"setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
+"unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
+"prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
+"configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
+"rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
+"package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
+ }
+
+ return actionmap
+
+def _validate_deps(mysettings, myroot, mydo, mydbapi):
+
+ invalid_dep_exempt_phases = \
+ set(["clean", "cleanrm", "help", "prerm", "postrm"])
+ all_keys = set(Package.metadata_keys)
+ all_keys.add("SRC_URI")
+ all_keys = tuple(all_keys)
+ metadata = dict(zip(all_keys,
+ mydbapi.aux_get(mysettings.mycpv, all_keys)))
+
+ class FakeTree(object):
+ def __init__(self, mydb):
+ self.dbapi = mydb
+
+ root_config = RootConfig(mysettings, {"porttree":FakeTree(mydbapi)}, None)
+
+ pkg = Package(built=False, cpv=mysettings.mycpv,
+ metadata=metadata, root_config=root_config,
+ type_name="ebuild")
+
+ msgs = []
+ if pkg.invalid:
+ for k, v in pkg.invalid.items():
+ for msg in v:
+ msgs.append(" %s\n" % (msg,))
+
+ if msgs:
+ portage.util.writemsg_level(_("Error(s) in metadata for '%s':\n") % \
+ (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
+ for x in msgs:
+ portage.util.writemsg_level(x,
+ level=logging.ERROR, noiselevel=-1)
+ if mydo not in invalid_dep_exempt_phases:
+ return 1
+
+ if not pkg.built and \
+ mydo not in ("digest", "help", "manifest") and \
+ pkg.metadata["REQUIRED_USE"] and \
+ eapi_has_required_use(pkg.metadata["EAPI"]):
+ result = check_required_use(pkg.metadata["REQUIRED_USE"],
+ pkg.use.enabled, pkg.iuse.is_valid_flag)
+ if not result:
+ reduced_noise = result.tounicode()
+ writemsg("\n %s\n" % _("The following REQUIRED_USE flag" + \
+ " constraints are unsatisfied:"), noiselevel=-1)
+ writemsg(" %s\n" % reduced_noise,
+ noiselevel=-1)
+ normalized_required_use = \
+ " ".join(pkg.metadata["REQUIRED_USE"].split())
+ if reduced_noise != normalized_required_use:
+ writemsg("\n %s\n" % _("The above constraints " + \
+ "are a subset of the following complete expression:"),
+ noiselevel=-1)
+ writemsg(" %s\n" % \
+ human_readable_required_use(normalized_required_use),
+ noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ return 1
+
+ return os.EX_OK
+
+# XXX This would be to replace getstatusoutput completely.
+# XXX Issue: cannot block execution. Deadlock condition.
+def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
+ """
+ Spawn a subprocess with extra portage-specific options.
+ Optiosn include:
+
+ Sandbox: Sandbox means the spawned process will be limited in its ability t
+ read and write files (normally this means it is restricted to ${D}/)
+ SElinux Sandbox: Enables sandboxing on SElinux
+ Reduced Privileges: Drops privilages such that the process runs as portage:portage
+ instead of as root.
+
+ Notes: os.system cannot be used because it messes with signal handling. Instead we
+ use the portage.process spawn* family of functions.
+
+ This function waits for the process to terminate.
+
+ @param mystring: Command to run
+ @type mystring: String
+ @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
+ @type mysettings: Dictionary or config instance
+ @param debug: Ignored
+ @type debug: Boolean
+ @param free: Enable sandboxing for this process
+ @type free: Boolean
+ @param droppriv: Drop to portage:portage when running this command
+ @type droppriv: Boolean
+ @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
+ @type sesandbox: Boolean
+ @param fakeroot: Run this command with faked root privileges
+ @type fakeroot: Boolean
+ @param keywords: Extra options encoded as a dict, to be passed to spawn
+ @type keywords: Dictionary
+ @rtype: Integer
+ @returns:
+ 1. The return code of the spawned process.
+ """
+
+ check_config_instance(mysettings)
+
+ fd_pipes = keywords.get("fd_pipes")
+ if fd_pipes is None:
+ fd_pipes = {
+ 0:sys.stdin.fileno(),
+ 1:sys.stdout.fileno(),
+ 2:sys.stderr.fileno(),
+ }
+ # In some cases the above print statements don't flush stdout, so
+ # it needs to be flushed before allowing a child process to use it
+ # so that output always shows in the correct order.
+ stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
+ for fd in fd_pipes.values():
+ if fd in stdout_filenos:
+ sys.stdout.flush()
+ sys.stderr.flush()
+ break
+
+ features = mysettings.features
+ # TODO: Enable fakeroot to be used together with droppriv. The
+ # fake ownership/permissions will have to be converted to real
+ # permissions in the merge phase.
+ fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
+ if droppriv and not uid and portage_gid and portage_uid:
+ keywords.update({"uid":portage_uid,"gid":portage_gid,
+ "groups":userpriv_groups,"umask":0o02})
+ if not free:
+ free=((droppriv and "usersandbox" not in features) or \
+ (not droppriv and "sandbox" not in features and \
+ "usersandbox" not in features and not fakeroot))
+
+ if not free and not (fakeroot or portage.process.sandbox_capable):
+ free = True
+
+ if mysettings.mycpv is not None:
+ keywords["opt_name"] = "[%s]" % mysettings.mycpv
+ else:
+ keywords["opt_name"] = "[%s/%s]" % \
+ (mysettings.get("CATEGORY",""), mysettings.get("PF",""))
+
+ if "depcheck" in features or "depcheckstrict" in features:
+ keywords["opt_name"] += " bash"
+ spawn_func = portage.process.spawn_autodep
+ elif free or "SANDBOX_ACTIVE" in os.environ:
+ keywords["opt_name"] += " bash"
+ spawn_func = portage.process.spawn_bash
+ elif fakeroot:
+ keywords["opt_name"] += " fakeroot"
+ keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
+ spawn_func = portage.process.spawn_fakeroot
+ else:
+ keywords["opt_name"] += " sandbox"
+ spawn_func = portage.process.spawn_sandbox
+
+ if sesandbox:
+ spawn_func = selinux.spawn_wrapper(spawn_func,
+ mysettings["PORTAGE_SANDBOX_T"])
+
+ if keywords.get("returnpid"):
+ return spawn_func(mystring, env=mysettings.environ(), **keywords)
+
+ proc = EbuildSpawnProcess(
+ background=False, args=mystring,
+ scheduler=PollScheduler().sched_iface, spawn_func=spawn_func,
+ settings=mysettings, **keywords)
+
+ proc.start()
+ proc.wait()
+
+ return proc.returncode
+
+# parse actionmap to spawn ebuild with the appropriate args
+def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
+ logfile=None, fd_pipes=None, returnpid=False):
+
+ if returnpid:
+ warnings.warn("portage.spawnebuild() called " + \
+ "with returnpid parameter enabled. This usage will " + \
+ "not be supported in the future.",
+ DeprecationWarning, stacklevel=2)
+
+ if not returnpid and \
+ (alwaysdep or "noauto" not in mysettings.features):
+ # process dependency first
+ if "dep" in actionmap[mydo]:
+ retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
+ mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+ if retval:
+ return retval
+
+ eapi = mysettings["EAPI"]
+
+ if mydo in ("configure", "prepare") and not eapi_has_src_prepare_and_src_configure(eapi):
+ return os.EX_OK
+
+ if mydo == "pretend" and not eapi_has_pkg_pretend(eapi):
+ return os.EX_OK
+
+ return _spawn_phase(mydo, mysettings,
+ actionmap=actionmap, logfile=logfile,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+
+_post_phase_cmds = {
+
+ "install" : [
+ "install_qa_check",
+ "install_symlink_html_docs"],
+
+ "preinst" : [
+ "preinst_sfperms",
+ "preinst_selinux_labels",
+ "preinst_suid_scan",
+ "preinst_mask"]
+}
+
+def _post_phase_userpriv_perms(mysettings):
+ if "userpriv" in mysettings.features and secpass >= 2:
+ """ Privileged phases may have left files that need to be made
+ writable to a less privileged user."""
+ apply_recursive_permissions(mysettings["T"],
+ uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
+ filemode=0o60, filemask=0)
+
+def _check_build_log(mysettings, out=None):
+ """
+ Search the content of $PORTAGE_LOG_FILE if it exists
+ and generate the following QA Notices when appropriate:
+
+ * Automake "maintainer mode"
+ * command not found
+ * Unrecognized configure options
+ """
+ logfile = mysettings.get("PORTAGE_LOG_FILE")
+ if logfile is None:
+ return
+ try:
+ f = open(_unicode_encode(logfile, encoding=_encodings['fs'],
+ errors='strict'), mode='rb')
+ except EnvironmentError:
+ return
+
+ if logfile.endswith('.gz'):
+ f = gzip.GzipFile(filename='', mode='rb', fileobj=f)
+
+ am_maintainer_mode = []
+ bash_command_not_found = []
+ bash_command_not_found_re = re.compile(
+ r'(.*): line (\d*): (.*): command not found$')
+ command_not_found_exclude_re = re.compile(r'/configure: line ')
+ helper_missing_file = []
+ helper_missing_file_re = re.compile(
+ r'^!!! (do|new).*: .* does not exist$')
+
+ configure_opts_warn = []
+ configure_opts_warn_re = re.compile(
+ r'^configure: WARNING: [Uu]nrecognized options: ')
+
+ # Exclude output from dev-libs/yaz-3.0.47 which looks like this:
+ #
+ #Configuration:
+ # Automake: ${SHELL} /var/tmp/portage/dev-libs/yaz-3.0.47/work/yaz-3.0.47/config/missing --run automake-1.10
+ am_maintainer_mode_re = re.compile(r'/missing --run ')
+ am_maintainer_mode_exclude_re = \
+ re.compile(r'(/missing --run (autoheader|autotest|help2man|makeinfo)|^\s*Automake:\s)')
+
+ make_jobserver_re = \
+ re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
+ make_jobserver = []
+
+ def _eerror(lines):
+ for line in lines:
+ eerror(line, phase="install", key=mysettings.mycpv, out=out)
+
+ try:
+ for line in f:
+ line = _unicode_decode(line)
+ if am_maintainer_mode_re.search(line) is not None and \
+ am_maintainer_mode_exclude_re.search(line) is None:
+ am_maintainer_mode.append(line.rstrip("\n"))
+
+ if bash_command_not_found_re.match(line) is not None and \
+ command_not_found_exclude_re.search(line) is None:
+ bash_command_not_found.append(line.rstrip("\n"))
+
+ if helper_missing_file_re.match(line) is not None:
+ helper_missing_file.append(line.rstrip("\n"))
+
+ if configure_opts_warn_re.match(line) is not None:
+ configure_opts_warn.append(line.rstrip("\n"))
+
+ if make_jobserver_re.match(line) is not None:
+ make_jobserver.append(line.rstrip("\n"))
+
+ except zlib.error as e:
+ _eerror(["portage encountered a zlib error: '%s'" % (e,),
+ "while reading the log file: '%s'" % logfile])
+ finally:
+ f.close()
+
+ def _eqawarn(lines):
+ for line in lines:
+ eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
+ wrap_width = 70
+
+ if am_maintainer_mode:
+ msg = [_("QA Notice: Automake \"maintainer mode\" detected:")]
+ msg.append("")
+ msg.extend("\t" + line for line in am_maintainer_mode)
+ msg.append("")
+ msg.extend(wrap(_(
+ "If you patch Makefile.am, "
+ "configure.in, or configure.ac then you "
+ "should use autotools.eclass and "
+ "eautomake or eautoreconf. Exceptions "
+ "are limited to system packages "
+ "for which it is impossible to run "
+ "autotools during stage building. "
+ "See http://www.gentoo.org/p"
+ "roj/en/qa/autofailure.xml for more information."),
+ wrap_width))
+ _eqawarn(msg)
+
+ if bash_command_not_found:
+ msg = [_("QA Notice: command not found:")]
+ msg.append("")
+ msg.extend("\t" + line for line in bash_command_not_found)
+ _eqawarn(msg)
+
+ if helper_missing_file:
+ msg = [_("QA Notice: file does not exist:")]
+ msg.append("")
+ msg.extend("\t" + line[4:] for line in helper_missing_file)
+ _eqawarn(msg)
+
+ if configure_opts_warn:
+ msg = [_("QA Notice: Unrecognized configure options:")]
+ msg.append("")
+ msg.extend("\t" + line for line in configure_opts_warn)
+ _eqawarn(msg)
+
+ if make_jobserver:
+ msg = [_("QA Notice: make jobserver unavailable:")]
+ msg.append("")
+ msg.extend("\t" + line for line in make_jobserver)
+ _eqawarn(msg)
+
+def _post_src_install_chost_fix(settings):
+ """
+ It's possible that the ebuild has changed the
+ CHOST variable, so revert it to the initial
+ setting.
+ """
+ if settings.get('CATEGORY') == 'virtual':
+ return
+
+ chost = settings.get('CHOST')
+ if chost:
+ write_atomic(os.path.join(settings['PORTAGE_BUILDDIR'],
+ 'build-info', 'CHOST'), chost + '\n')
+
+_vdb_use_conditional_keys = ('DEPEND', 'LICENSE', 'PDEPEND',
+ 'PROPERTIES', 'PROVIDE', 'RDEPEND', 'RESTRICT',)
+_vdb_use_conditional_atoms = frozenset(['DEPEND', 'PDEPEND', 'RDEPEND'])
+
+def _preinst_bsdflags(mysettings):
+ if bsd_chflags:
+ # Save all the file flags for restoration later.
+ os.system("mtree -c -p %s -k flags > %s" % \
+ (_shell_quote(mysettings["D"]),
+ _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+ # Remove all the file flags to avoid EPERM errors.
+ os.system("chflags -R noschg,nouchg,nosappnd,nouappnd %s" % \
+ (_shell_quote(mysettings["D"]),))
+ os.system("chflags -R nosunlnk,nouunlnk %s 2>/dev/null" % \
+ (_shell_quote(mysettings["D"]),))
+
+
+def _postinst_bsdflags(mysettings):
+ if bsd_chflags:
+ # Restore all of the flags saved above.
+ os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
+ (_shell_quote(mysettings["ROOT"]),
+ _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+def _post_src_install_uid_fix(mysettings, out):
+ """
+ Files in $D with user and group bits that match the "portage"
+ user or group are automatically mapped to PORTAGE_INST_UID and
+ PORTAGE_INST_GID if necessary. The chown system call may clear
+ S_ISUID and S_ISGID bits, so those bits are restored if
+ necessary.
+ """
+
+ os = _os_merge
+
+ inst_uid = int(mysettings["PORTAGE_INST_UID"])
+ inst_gid = int(mysettings["PORTAGE_INST_GID"])
+
+ _preinst_bsdflags(mysettings)
+
+ destdir = mysettings["D"]
+ unicode_errors = []
+
+ while True:
+
+ unicode_error = False
+ size = 0
+ counted_inodes = set()
+ fixlafiles_announced = False
+ fixlafiles = "fixlafiles" in mysettings.features
+
+ for parent, dirs, files in os.walk(destdir):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ new_parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='replace')
+ new_parent = _unicode_encode(new_parent,
+ encoding=_encodings['merge'], errors='backslashreplace')
+ new_parent = _unicode_decode(new_parent,
+ encoding=_encodings['merge'], errors='replace')
+ os.rename(parent, new_parent)
+ unicode_error = True
+ unicode_errors.append(new_parent[len(destdir):])
+ break
+
+ for fname in chain(dirs, files):
+ try:
+ fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ fpath = _os.path.join(
+ parent.encode(_encodings['merge']), fname)
+ new_fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fname = _unicode_encode(new_fname,
+ encoding=_encodings['merge'], errors='backslashreplace')
+ new_fname = _unicode_decode(new_fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fpath = os.path.join(parent, new_fname)
+ os.rename(fpath, new_fpath)
+ unicode_error = True
+ unicode_errors.append(new_fpath[len(destdir):])
+ fname = new_fname
+ fpath = new_fpath
+ else:
+ fpath = os.path.join(parent, fname)
+
+ if fixlafiles and \
+ fname.endswith(".la") and os.path.isfile(fpath):
+ f = open(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict'),
+ mode='rb')
+ has_lafile_header = b'.la - a libtool library file' \
+ in f.readline()
+ f.seek(0)
+ contents = f.read()
+ f.close()
+ try:
+ needs_update, new_contents = rewrite_lafile(contents)
+ except portage.exception.InvalidData as e:
+ needs_update = False
+ if not fixlafiles_announced:
+ fixlafiles_announced = True
+ writemsg("Fixing .la files\n", fd=out)
+
+ # Suppress warnings if the file does not have the
+ # expected header (bug #340725). Even if the header is
+ # missing, we still call rewrite_lafile() since some
+ # valid libtool archives may not have the header.
+ msg = " %s is not a valid libtool archive, skipping\n" % fpath[len(destdir):]
+ qa_msg = "QA Notice: invalid .la file found: %s, %s" % (fpath[len(destdir):], e)
+ if has_lafile_header:
+ writemsg(msg, fd=out)
+ eqawarn(qa_msg, key=mysettings.mycpv, out=out)
+
+ if needs_update:
+ if not fixlafiles_announced:
+ fixlafiles_announced = True
+ writemsg("Fixing .la files\n", fd=out)
+ writemsg(" %s\n" % fpath[len(destdir):], fd=out)
+ # write_atomic succeeds even in some cases in which
+ # a normal write might fail due to file permission
+ # settings on some operating systems such as HP-UX
+ write_atomic(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict'),
+ new_contents, mode='wb')
+
+ mystat = os.lstat(fpath)
+ if stat.S_ISREG(mystat.st_mode) and \
+ mystat.st_ino not in counted_inodes:
+ counted_inodes.add(mystat.st_ino)
+ size += mystat.st_size
+ if mystat.st_uid != portage_uid and \
+ mystat.st_gid != portage_gid:
+ continue
+ myuid = -1
+ mygid = -1
+ if mystat.st_uid == portage_uid:
+ myuid = inst_uid
+ if mystat.st_gid == portage_gid:
+ mygid = inst_gid
+ apply_secpass_permissions(
+ _unicode_encode(fpath, encoding=_encodings['merge']),
+ uid=myuid, gid=mygid,
+ mode=mystat.st_mode, stat_cached=mystat,
+ follow_links=False)
+
+ if unicode_error:
+ break
+
+ if not unicode_error:
+ break
+
+ if unicode_errors:
+ for l in _merge_unicode_error(unicode_errors):
+ eerror(l, phase='install', key=mysettings.mycpv, out=out)
+
+ build_info_dir = os.path.join(mysettings['PORTAGE_BUILDDIR'],
+ 'build-info')
+
+ io.open(_unicode_encode(os.path.join(build_info_dir,
+ 'SIZE'), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict').write(_unicode_decode(str(size) + '\n'))
+
+ io.open(_unicode_encode(os.path.join(build_info_dir,
+ 'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict').write(_unicode_decode("%.0f\n" % (time.time(),)))
+
+ use = frozenset(mysettings['PORTAGE_USE'].split())
+ for k in _vdb_use_conditional_keys:
+ v = mysettings.configdict['pkg'].get(k)
+ filename = os.path.join(build_info_dir, k)
+ if v is None:
+ try:
+ os.unlink(filename)
+ except OSError:
+ pass
+ continue
+
+ if k.endswith('DEPEND'):
+ token_class = Atom
+ else:
+ token_class = None
+
+ v = use_reduce(v, uselist=use, token_class=token_class)
+ v = paren_enclose(v)
+ if not v:
+ try:
+ os.unlink(filename)
+ except OSError:
+ pass
+ continue
+ io.open(_unicode_encode(os.path.join(build_info_dir,
+ k), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict').write(_unicode_decode(v + '\n'))
+
+ _reapply_bsdflags_to_image(mysettings)
+
+def _reapply_bsdflags_to_image(mysettings):
+ """
+ Reapply flags saved and removed by _preinst_bsdflags.
+ """
+ if bsd_chflags:
+ os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
+ (_shell_quote(mysettings["D"]),
+ _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+def _post_src_install_soname_symlinks(mysettings, out):
+ """
+ Check that libraries in $D have corresponding soname symlinks.
+ If symlinks are missing then create them and trigger a QA Notice.
+ This requires $PORTAGE_BUILDDIR/build-info/NEEDED.ELF.2 for
+ operation.
+ """
+
+ image_dir = mysettings["D"]
+ needed_filename = os.path.join(mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "NEEDED.ELF.2")
+
+ try:
+ lines = io.open(_unicode_encode(needed_filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace').readlines()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ return
+
+ libpaths = set(portage.util.getlibpaths(
+ mysettings["ROOT"], env=mysettings))
+ libpath_inodes = set()
+ for libpath in libpaths:
+ libdir = os.path.join(mysettings["ROOT"], libpath.lstrip(os.sep))
+ try:
+ s = os.stat(libdir)
+ except OSError:
+ continue
+ else:
+ libpath_inodes.add((s.st_dev, s.st_ino))
+
+ is_libdir_cache = {}
+
+ def is_libdir(obj_parent):
+ try:
+ return is_libdir_cache[obj_parent]
+ except KeyError:
+ pass
+
+ rval = False
+ if obj_parent in libpaths:
+ rval = True
+ else:
+ parent_path = os.path.join(mysettings["ROOT"],
+ obj_parent.lstrip(os.sep))
+ try:
+ s = os.stat(parent_path)
+ except OSError:
+ pass
+ else:
+ if (s.st_dev, s.st_ino) in libpath_inodes:
+ rval = True
+
+ is_libdir_cache[obj_parent] = rval
+ return rval
+
+ missing_symlinks = []
+
+ # Parse NEEDED.ELF.2 like LinkageMapELF.rebuild() does.
+ for l in lines:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 5:
+ portage.util.writemsg_level(_("\nWrong number of fields " \
+ "in %s: %s\n\n") % (needed_filename, l),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ obj, soname = fields[1:3]
+ if not soname:
+ continue
+ if not is_libdir(os.path.dirname(obj)):
+ continue
+
+ obj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))
+ sym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)
+ try:
+ os.lstat(sym_file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ else:
+ continue
+
+ missing_symlinks.append((obj, soname))
+
+ if not missing_symlinks:
+ return
+
+ qa_msg = ["QA Notice: Missing soname symlink(s) " + \
+ "will be automatically created:"]
+ qa_msg.append("")
+ qa_msg.extend("\t%s -> %s" % (os.path.join(
+ os.path.dirname(obj).lstrip(os.sep), soname),
+ os.path.basename(obj))
+ for obj, soname in missing_symlinks)
+ qa_msg.append("")
+ for line in qa_msg:
+ eqawarn(line, key=mysettings.mycpv, out=out)
+
+ _preinst_bsdflags(mysettings)
+ for obj, soname in missing_symlinks:
+ obj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))
+ sym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)
+ os.symlink(os.path.basename(obj_file_path), sym_file_path)
+ _reapply_bsdflags_to_image(mysettings)
+
+def _merge_unicode_error(errors):
+ lines = []
+
+ msg = _("This package installs one or more file names containing "
+ "characters that do not match your current locale "
+ "settings. The current setting for filesystem encoding is '%s'.") \
+ % _encodings['merge']
+ lines.extend(wrap(msg, 72))
+
+ lines.append("")
+ errors.sort()
+ lines.extend("\t" + x for x in errors)
+ lines.append("")
+
+ if _encodings['merge'].lower().replace('_', '').replace('-', '') != 'utf8':
+ msg = _("For best results, UTF-8 encoding is recommended. See "
+ "the Gentoo Linux Localization Guide for instructions "
+ "about how to configure your locale for UTF-8 encoding:")
+ lines.extend(wrap(msg, 72))
+ lines.append("")
+ lines.append("\t" + \
+ "http://www.gentoo.org/doc/en/guide-localization.xml")
+ lines.append("")
+
+ return lines
diff --git a/portage_with_autodep/pym/portage/package/ebuild/fetch.py b/portage_with_autodep/pym/portage/package/ebuild/fetch.py
new file mode 100644
index 0000000..5cbbf87
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/fetch.py
@@ -0,0 +1,1129 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+__all__ = ['fetch']
+
+import errno
+import io
+import logging
+import random
+import re
+import shutil
+import stat
+import sys
+import tempfile
+
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.config:check_config_instance,config',
+ 'portage.package.ebuild.doebuild:doebuild_environment,' + \
+ '_doebuild_spawn',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+)
+
+from portage import OrderedDict, os, selinux, _encodings, \
+ _shell_quote, _unicode_encode
+from portage.checksum import hashfunc_map, perform_md5, verify_all
+from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
+ GLOBAL_CONFIG_PATH
+from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
+from portage.exception import FileNotFound, OperationNotPermitted, \
+ PortageException, TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.manifest import Manifest
+from portage.output import colorize, EOutput
+from portage.util import apply_recursive_permissions, \
+ apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \
+ varexpand, writemsg, writemsg_level, writemsg_stdout
+from portage.process import spawn
+
+_userpriv_spawn_kwargs = (
+ ("uid", portage_uid),
+ ("gid", portage_gid),
+ ("groups", userpriv_groups),
+ ("umask", 0o02),
+)
+
+def _spawn_fetch(settings, args, **kwargs):
+ """
+ Spawn a process with appropriate settings for fetching, including
+ userfetch and selinux support.
+ """
+
+ global _userpriv_spawn_kwargs
+
+ # Redirect all output to stdout since some fetchers like
+ # wget pollute stderr (if portage detects a problem then it
+ # can send it's own message to stderr).
+ if "fd_pipes" not in kwargs:
+
+ kwargs["fd_pipes"] = {
+ 0 : sys.stdin.fileno(),
+ 1 : sys.stdout.fileno(),
+ 2 : sys.stdout.fileno(),
+ }
+
+ if "userfetch" in settings.features and \
+ os.getuid() == 0 and portage_gid and portage_uid:
+ kwargs.update(_userpriv_spawn_kwargs)
+
+ spawn_func = spawn
+
+ if settings.selinux_enabled():
+ spawn_func = selinux.spawn_wrapper(spawn_func,
+ settings["PORTAGE_FETCH_T"])
+
+ # bash is an allowed entrypoint, while most binaries are not
+ if args[0] != BASH_BINARY:
+ args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
+
+ # Ensure that EBUILD_PHASE is set to fetch, so that config.environ()
+ # does not filter the calling environment (which may contain needed
+ # proxy variables, as in bug #315421).
+ phase_backup = settings.get('EBUILD_PHASE')
+ settings['EBUILD_PHASE'] = 'fetch'
+ try:
+ rval = spawn_func(args, env=settings.environ(), **kwargs)
+ finally:
+ if phase_backup is None:
+ settings.pop('EBUILD_PHASE', None)
+ else:
+ settings['EBUILD_PHASE'] = phase_backup
+
+ return rval
+
+_userpriv_test_write_file_cache = {}
+_userpriv_test_write_cmd_script = ">> %(file_path)s 2>/dev/null ; rval=$? ; " + \
+ "rm -f %(file_path)s ; exit $rval"
+
+def _userpriv_test_write_file(settings, file_path):
+ """
+ Drop privileges and try to open a file for writing. The file may or
+ may not exist, and the parent directory is assumed to exist. The file
+ is removed before returning.
+
+ @param settings: A config instance which is passed to _spawn_fetch()
+ @param file_path: A file path to open and write.
+ @return: True if write succeeds, False otherwise.
+ """
+
+ global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
+ rval = _userpriv_test_write_file_cache.get(file_path)
+ if rval is not None:
+ return rval
+
+ args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
+ {"file_path" : _shell_quote(file_path)}]
+
+ returncode = _spawn_fetch(settings, args)
+
+ rval = returncode == os.EX_OK
+ _userpriv_test_write_file_cache[file_path] = rval
+ return rval
+
+def _checksum_failure_temp_file(distdir, basename):
+ """
+ First try to find a duplicate temp file with the same checksum and return
+ that filename if available. Otherwise, use mkstemp to create a new unique
+ filename._checksum_failure_.$RANDOM, rename the given file, and return the
+ new filename. In any case, filename will be renamed or removed before this
+ function returns a temp filename.
+ """
+
+ filename = os.path.join(distdir, basename)
+ size = os.stat(filename).st_size
+ checksum = None
+ tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
+ for temp_filename in os.listdir(distdir):
+ if not tempfile_re.match(temp_filename):
+ continue
+ temp_filename = os.path.join(distdir, temp_filename)
+ try:
+ if size != os.stat(temp_filename).st_size:
+ continue
+ except OSError:
+ continue
+ try:
+ temp_checksum = perform_md5(temp_filename)
+ except FileNotFound:
+ # Apparently the temp file disappeared. Let it go.
+ continue
+ if checksum is None:
+ checksum = perform_md5(filename)
+ if checksum == temp_checksum:
+ os.unlink(filename)
+ return temp_filename
+
+ fd, temp_filename = \
+ tempfile.mkstemp("", basename + "._checksum_failure_.", distdir)
+ os.close(fd)
+ os.rename(filename, temp_filename)
+ return temp_filename
+
+def _check_digests(filename, digests, show_errors=1):
+ """
+ Check digests and display a message if an error occurs.
+ @return True if all digests match, False otherwise.
+ """
+ verified_ok, reason = verify_all(filename, digests)
+ if not verified_ok:
+ if show_errors:
+ writemsg(_("!!! Previously fetched"
+ " file: '%s'\n") % filename, noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n"
+ "!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+ return False
+ return True
+
+def _check_distfile(filename, digests, eout, show_errors=1):
+ """
+ @return a tuple of (match, stat_obj) where match is True if filename
+ matches all given digests (if any) and stat_obj is a stat result, or
+ None if the file does not exist.
+ """
+ if digests is None:
+ digests = {}
+ size = digests.get("size")
+ if size is not None and len(digests) == 1:
+ digests = None
+
+ try:
+ st = os.stat(filename)
+ except OSError:
+ return (False, None)
+ if size is not None and size != st.st_size:
+ return (False, st)
+ if not digests:
+ if size is not None:
+ eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
+ eout.eend(0)
+ elif st.st_size == 0:
+ # Zero-byte distfiles are always invalid.
+ return (False, st)
+ else:
+ if _check_digests(filename, digests, show_errors=show_errors):
+ eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
+ " ".join(sorted(digests))))
+ eout.eend(0)
+ else:
+ return (False, st)
+ return (True, st)
+
+_fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
+
+_size_suffix_map = {
+ '' : 0,
+ 'K' : 10,
+ 'M' : 20,
+ 'G' : 30,
+ 'T' : 40,
+ 'P' : 50,
+ 'E' : 60,
+ 'Z' : 70,
+ 'Y' : 80,
+}
+
+def fetch(myuris, mysettings, listonly=0, fetchonly=0,
+ locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
+ allow_missing_digests=True):
+ "fetch files. Will use digest file if available."
+
+ if not myuris:
+ return 1
+
+ features = mysettings.features
+ restrict = mysettings.get("PORTAGE_RESTRICT","").split()
+
+ userfetch = secpass >= 2 and "userfetch" in features
+ userpriv = secpass >= 2 and "userpriv" in features
+
+ # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
+ restrict_mirror = "mirror" in restrict or "nomirror" in restrict
+ if restrict_mirror:
+ if ("mirror" in features) and ("lmirror" not in features):
+ # lmirror should allow you to bypass mirror restrictions.
+ # XXX: This is not a good thing, and is temporary at best.
+ print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
+ return 1
+
+ # Generally, downloading the same file repeatedly from
+ # every single available mirror is a waste of bandwidth
+ # and time, so there needs to be a cap.
+ checksum_failure_max_tries = 5
+ v = checksum_failure_max_tries
+ try:
+ v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
+ checksum_failure_max_tries))
+ except (ValueError, OverflowError):
+ writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
+ " contains non-integer value: '%s'\n") % \
+ mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
+ writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
+ "default value: %s\n") % checksum_failure_max_tries,
+ noiselevel=-1)
+ v = checksum_failure_max_tries
+ if v < 1:
+ writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
+ " contains value less than 1: '%s'\n") % v, noiselevel=-1)
+ writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
+ "default value: %s\n") % checksum_failure_max_tries,
+ noiselevel=-1)
+ v = checksum_failure_max_tries
+ checksum_failure_max_tries = v
+ del v
+
+ fetch_resume_size_default = "350K"
+ fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
+ if fetch_resume_size is not None:
+ fetch_resume_size = "".join(fetch_resume_size.split())
+ if not fetch_resume_size:
+ # If it's undefined or empty, silently use the default.
+ fetch_resume_size = fetch_resume_size_default
+ match = _fetch_resume_size_re.match(fetch_resume_size)
+ if match is None or \
+ (match.group(2).upper() not in _size_suffix_map):
+ writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
+ " contains an unrecognized format: '%s'\n") % \
+ mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
+ writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
+ "default value: %s\n") % fetch_resume_size_default,
+ noiselevel=-1)
+ fetch_resume_size = None
+ if fetch_resume_size is None:
+ fetch_resume_size = fetch_resume_size_default
+ match = _fetch_resume_size_re.match(fetch_resume_size)
+ fetch_resume_size = int(match.group(1)) * \
+ 2 ** _size_suffix_map[match.group(2).upper()]
+
+ # Behave like the package has RESTRICT="primaryuri" after a
+ # couple of checksum failures, to increase the probablility
+ # of success before checksum_failure_max_tries is reached.
+ checksum_failure_primaryuri = 2
+ thirdpartymirrors = mysettings.thirdpartymirrors()
+
+ # In the background parallel-fetch process, it's safe to skip checksum
+ # verification of pre-existing files in $DISTDIR that have the correct
+ # file size. The parent process will verify their checksums prior to
+ # the unpack phase.
+
+ parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
+ if parallel_fetchonly:
+ fetchonly = 1
+
+ check_config_instance(mysettings)
+
+ custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
+ CUSTOM_MIRRORS_FILE), recursive=1)
+
+ mymirrors=[]
+
+ if listonly or ("distlocks" not in features):
+ use_locks = 0
+
+ fetch_to_ro = 0
+ if "skiprocheck" in features:
+ fetch_to_ro = 1
+
+ if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
+ if use_locks:
+ writemsg(colorize("BAD",
+ _("!!! For fetching to a read-only filesystem, "
+ "locking should be turned off.\n")), noiselevel=-1)
+ writemsg(_("!!! This can be done by adding -distlocks to "
+ "FEATURES in /etc/make.conf\n"), noiselevel=-1)
+# use_locks = 0
+
+ # local mirrors are always added
+ if "local" in custommirrors:
+ mymirrors += custommirrors["local"]
+
+ if restrict_mirror:
+ # We don't add any mirrors.
+ pass
+ else:
+ if try_mirrors:
+ mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
+
+ skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
+ if skip_manifest:
+ allow_missing_digests = True
+ pkgdir = mysettings.get("O")
+ if digests is None and not (pkgdir is None or skip_manifest):
+ mydigests = Manifest(
+ pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
+ elif digests is None or skip_manifest:
+ # no digests because fetch was not called for a specific package
+ mydigests = {}
+ else:
+ mydigests = digests
+
+ ro_distdirs = [x for x in \
+ shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
+ if os.path.isdir(x)]
+
+ fsmirrors = []
+ for x in range(len(mymirrors)-1,-1,-1):
+ if mymirrors[x] and mymirrors[x][0]=='/':
+ fsmirrors += [mymirrors[x]]
+ del mymirrors[x]
+
+ restrict_fetch = "fetch" in restrict
+ force_mirror = "force-mirror" in features and not restrict_mirror
+ custom_local_mirrors = custommirrors.get("local", [])
+ if restrict_fetch:
+ # With fetch restriction, a normal uri may only be fetched from
+ # custom local mirrors (if available). A mirror:// uri may also
+ # be fetched from specific mirrors (effectively overriding fetch
+ # restriction, but only for specific mirrors).
+ locations = custom_local_mirrors
+ else:
+ locations = mymirrors
+
+ file_uri_tuples = []
+ # Check for 'items' attribute since OrderedDict is not a dict.
+ if hasattr(myuris, 'items'):
+ for myfile, uri_set in myuris.items():
+ for myuri in uri_set:
+ file_uri_tuples.append((myfile, myuri))
+ else:
+ for myuri in myuris:
+ file_uri_tuples.append((os.path.basename(myuri), myuri))
+
+ filedict = OrderedDict()
+ primaryuri_indexes={}
+ primaryuri_dict = {}
+ thirdpartymirror_uris = {}
+ for myfile, myuri in file_uri_tuples:
+ if myfile not in filedict:
+ filedict[myfile]=[]
+ for y in range(0,len(locations)):
+ filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
+ if myuri[:9]=="mirror://":
+ eidx = myuri.find("/", 9)
+ if eidx != -1:
+ mirrorname = myuri[9:eidx]
+ path = myuri[eidx+1:]
+
+ # Try user-defined mirrors first
+ if mirrorname in custommirrors:
+ for cmirr in custommirrors[mirrorname]:
+ filedict[myfile].append(
+ cmirr.rstrip("/") + "/" + path)
+
+ # now try the official mirrors
+ if mirrorname in thirdpartymirrors:
+ random.shuffle(thirdpartymirrors[mirrorname])
+
+ uris = [locmirr.rstrip("/") + "/" + path \
+ for locmirr in thirdpartymirrors[mirrorname]]
+ filedict[myfile].extend(uris)
+ thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
+
+ if not filedict[myfile]:
+ writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
+ else:
+ writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
+ writemsg(" %s\n" % (myuri), noiselevel=-1)
+ else:
+ if restrict_fetch or force_mirror:
+ # Only fetch from specific mirrors is allowed.
+ continue
+ if "primaryuri" in restrict:
+ # Use the source site first.
+ if myfile in primaryuri_indexes:
+ primaryuri_indexes[myfile] += 1
+ else:
+ primaryuri_indexes[myfile] = 0
+ filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
+ else:
+ filedict[myfile].append(myuri)
+ primaryuris = primaryuri_dict.get(myfile)
+ if primaryuris is None:
+ primaryuris = []
+ primaryuri_dict[myfile] = primaryuris
+ primaryuris.append(myuri)
+
+ # Prefer thirdpartymirrors over normal mirrors in cases when
+ # the file does not yet exist on the normal mirrors.
+ for myfile, uris in thirdpartymirror_uris.items():
+ primaryuri_dict.setdefault(myfile, []).extend(uris)
+
+ can_fetch=True
+
+ if listonly:
+ can_fetch = False
+
+ if can_fetch and not fetch_to_ro:
+ global _userpriv_test_write_file_cache
+ dirmode = 0o070
+ filemode = 0o60
+ modemask = 0o2
+ dir_gid = portage_gid
+ if "FAKED_MODE" in mysettings:
+ # When inside fakeroot, directories with portage's gid appear
+ # to have root's gid. Therefore, use root's gid instead of
+ # portage's gid to avoid spurrious permissions adjustments
+ # when inside fakeroot.
+ dir_gid = 0
+ distdir_dirs = [""]
+ try:
+
+ for x in distdir_dirs:
+ mydir = os.path.join(mysettings["DISTDIR"], x)
+ write_test_file = os.path.join(
+ mydir, ".__portage_test_write__")
+
+ try:
+ st = os.stat(mydir)
+ except OSError:
+ st = None
+
+ if st is not None and stat.S_ISDIR(st.st_mode):
+ if not (userfetch or userpriv):
+ continue
+ if _userpriv_test_write_file(mysettings, write_test_file):
+ continue
+
+ _userpriv_test_write_file_cache.pop(write_test_file, None)
+ if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
+ if st is None:
+ # The directory has just been created
+ # and therefore it must be empty.
+ continue
+ writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
+ noiselevel=-1)
+ def onerror(e):
+ raise # bail out on the first error that occurs during recursion
+ if not apply_recursive_permissions(mydir,
+ gid=dir_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise OperationNotPermitted(
+ _("Failed to apply recursive permissions for the portage group."))
+ except PortageException as e:
+ if not os.path.isdir(mysettings["DISTDIR"]):
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
+ writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
+
+ if can_fetch and \
+ not fetch_to_ro and \
+ not os.access(mysettings["DISTDIR"], os.W_OK):
+ writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
+ noiselevel=-1)
+ can_fetch = False
+
+ distdir_writable = can_fetch and not fetch_to_ro
+ failed_files = set()
+ restrict_fetch_msg = False
+
+ for myfile in filedict:
+ """
+ fetched status
+ 0 nonexistent
+ 1 partially downloaded
+ 2 completely downloaded
+ """
+ fetched = 0
+
+ orig_digests = mydigests.get(myfile, {})
+
+ if not (allow_missing_digests or listonly):
+ verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
+ verifiable_hash_types.discard("size")
+ if not verifiable_hash_types:
+ expected = set(hashfunc_map)
+ expected.discard("size")
+ expected = " ".join(sorted(expected))
+ got = set(orig_digests)
+ got.discard("size")
+ got = " ".join(sorted(got))
+ reason = (_("Insufficient data for checksum verification"),
+ got, expected)
+ writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
+ noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+
+ if fetchonly:
+ failed_files.add(myfile)
+ continue
+ else:
+ return 0
+
+ size = orig_digests.get("size")
+ if size == 0:
+ # Zero-byte distfiles are always invalid, so discard their digests.
+ del mydigests[myfile]
+ orig_digests.clear()
+ size = None
+ pruned_digests = orig_digests
+ if parallel_fetchonly:
+ pruned_digests = {}
+ if size is not None:
+ pruned_digests["size"] = size
+
+ myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
+ has_space = True
+ has_space_superuser = True
+ file_lock = None
+ if listonly:
+ writemsg_stdout("\n", noiselevel=-1)
+ else:
+ # check if there is enough space in DISTDIR to completely store myfile
+ # overestimate the filesize so we aren't bitten by FS overhead
+ vfs_stat = None
+ if size is not None and hasattr(os, "statvfs"):
+ try:
+ vfs_stat = os.statvfs(mysettings["DISTDIR"])
+ except OSError as e:
+ writemsg_level("!!! statvfs('%s'): %s\n" %
+ (mysettings["DISTDIR"], e),
+ noiselevel=-1, level=logging.ERROR)
+ del e
+
+ if vfs_stat is not None:
+ try:
+ mysize = os.stat(myfile_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ mysize = 0
+ if (size - mysize + vfs_stat.f_bsize) >= \
+ (vfs_stat.f_bsize * vfs_stat.f_bavail):
+
+ if (size - mysize + vfs_stat.f_bsize) >= \
+ (vfs_stat.f_bsize * vfs_stat.f_bfree):
+ has_space_superuser = False
+
+ if not has_space_superuser:
+ has_space = False
+ elif secpass < 2:
+ has_space = False
+ elif userfetch:
+ has_space = False
+
+ if not has_space:
+ writemsg(_("!!! Insufficient space to store %s in %s\n") % \
+ (myfile, mysettings["DISTDIR"]), noiselevel=-1)
+
+ if has_space_superuser:
+ writemsg(_("!!! Insufficient privileges to use "
+ "remaining space.\n"), noiselevel=-1)
+ if userfetch:
+ writemsg(_("!!! You may set FEATURES=\"-userfetch\""
+ " in /etc/make.conf in order to fetch with\n"
+ "!!! superuser privileges.\n"), noiselevel=-1)
+
+ if distdir_writable and use_locks:
+
+ lock_kwargs = {}
+ if fetchonly:
+ lock_kwargs["flags"] = os.O_NONBLOCK
+
+ try:
+ file_lock = lockfile(myfile_path,
+ wantnewlockfile=1, **lock_kwargs)
+ except TryAgain:
+ writemsg(_(">>> File '%s' is already locked by "
+ "another fetcher. Continuing...\n") % myfile,
+ noiselevel=-1)
+ continue
+ try:
+ if not listonly:
+
+ eout = EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
+ match, mystat = _check_distfile(
+ myfile_path, pruned_digests, eout)
+ if match:
+ if distdir_writable:
+ try:
+ apply_secpass_permissions(myfile_path,
+ gid=portage_gid, mode=0o664, mask=0o2,
+ stat_cached=mystat)
+ except PortageException as e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg(_("!!! Failed to adjust permissions:"
+ " %s\n") % str(e), noiselevel=-1)
+ del e
+ continue
+
+ if distdir_writable and mystat is None:
+ # Remove broken symlinks if necessary.
+ try:
+ os.unlink(myfile_path)
+ except OSError:
+ pass
+
+ if mystat is not None:
+ if stat.S_ISDIR(mystat.st_mode):
+ writemsg_level(
+ _("!!! Unable to fetch file since "
+ "a directory is in the way: \n"
+ "!!! %s\n") % myfile_path,
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+
+ if mystat.st_size == 0:
+ if distdir_writable:
+ try:
+ os.unlink(myfile_path)
+ except OSError:
+ pass
+ elif distdir_writable:
+ if mystat.st_size < fetch_resume_size and \
+ mystat.st_size < size:
+ # If the file already exists and the size does not
+ # match the existing digests, it may be that the
+ # user is attempting to update the digest. In this
+ # case, the digestgen() function will advise the
+ # user to use `ebuild --force foo.ebuild manifest`
+ # in order to force the old digests to be replaced.
+ # Since the user may want to keep this file, rename
+ # it instead of deleting it.
+ writemsg(_(">>> Renaming distfile with size "
+ "%d (smaller than " "PORTAGE_FETCH_RESU"
+ "ME_MIN_SIZE)\n") % mystat.st_size)
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+ elif mystat.st_size >= size:
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+
+ if distdir_writable and ro_distdirs:
+ readonly_file = None
+ for x in ro_distdirs:
+ filename = os.path.join(x, myfile)
+ match, mystat = _check_distfile(
+ filename, pruned_digests, eout)
+ if match:
+ readonly_file = filename
+ break
+ if readonly_file is not None:
+ try:
+ os.unlink(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ os.symlink(readonly_file, myfile_path)
+ continue
+
+ if fsmirrors and not os.path.exists(myfile_path) and has_space:
+ for mydir in fsmirrors:
+ mirror_file = os.path.join(mydir, myfile)
+ try:
+ shutil.copyfile(mirror_file, myfile_path)
+ writemsg(_("Local mirror has file: %s\n") % myfile)
+ break
+ except (IOError, OSError) as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ else:
+ try:
+ apply_secpass_permissions(
+ myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
+ stat_cached=mystat)
+ except PortageException as e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg(_("!!! Failed to adjust permissions:"
+ " %s\n") % str(e), noiselevel=-1)
+
+ # If the file is empty then it's obviously invalid. Remove
+ # the empty file and try to download if possible.
+ if mystat.st_size == 0:
+ if distdir_writable:
+ try:
+ os.unlink(myfile_path)
+ except EnvironmentError:
+ pass
+ elif myfile not in mydigests:
+ # We don't have a digest, but the file exists. We must
+ # assume that it is fully downloaded.
+ continue
+ else:
+ if mystat.st_size < mydigests[myfile]["size"] and \
+ not restrict_fetch:
+ fetched = 1 # Try to resume this download.
+ elif parallel_fetchonly and \
+ mystat.st_size == mydigests[myfile]["size"]:
+ eout = EOutput()
+ eout.quiet = \
+ mysettings.get("PORTAGE_QUIET") == "1"
+ eout.ebegin(
+ "%s size ;-)" % (myfile, ))
+ eout.eend(0)
+ continue
+ else:
+ verified_ok, reason = verify_all(
+ myfile_path, mydigests[myfile])
+ if not verified_ok:
+ writemsg(_("!!! Previously fetched"
+ " file: '%s'\n") % myfile, noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n"
+ "!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+ if reason[0] == _("Insufficient data for checksum verification"):
+ return 0
+ if distdir_writable:
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+ else:
+ eout = EOutput()
+ eout.quiet = \
+ mysettings.get("PORTAGE_QUIET", None) == "1"
+ digests = mydigests.get(myfile)
+ if digests:
+ digests = list(digests)
+ digests.sort()
+ eout.ebegin(
+ "%s %s ;-)" % (myfile, " ".join(digests)))
+ eout.eend(0)
+ continue # fetch any remaining files
+
+ # Create a reversed list since that is optimal for list.pop().
+ uri_list = filedict[myfile][:]
+ uri_list.reverse()
+ checksum_failure_count = 0
+ tried_locations = set()
+ while uri_list:
+ loc = uri_list.pop()
+ # Eliminate duplicates here in case we've switched to
+ # "primaryuri" mode on the fly due to a checksum failure.
+ if loc in tried_locations:
+ continue
+ tried_locations.add(loc)
+ if listonly:
+ writemsg_stdout(loc+" ", noiselevel=-1)
+ continue
+ # allow different fetchcommands per protocol
+ protocol = loc[0:loc.find("://")]
+
+ global_config_path = GLOBAL_CONFIG_PATH
+ if mysettings['EPREFIX']:
+ global_config_path = os.path.join(mysettings['EPREFIX'],
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+
+ missing_file_param = False
+ fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
+ fetchcommand = mysettings.get(fetchcommand_var)
+ if fetchcommand is None:
+ fetchcommand_var = "FETCHCOMMAND"
+ fetchcommand = mysettings.get(fetchcommand_var)
+ if fetchcommand is None:
+ writemsg_level(
+ _("!!! %s is unset. It should "
+ "have been defined in\n!!! %s/make.globals.\n") \
+ % (fetchcommand_var, global_config_path),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+ if "${FILE}" not in fetchcommand:
+ writemsg_level(
+ _("!!! %s does not contain the required ${FILE}"
+ " parameter.\n") % fetchcommand_var,
+ level=logging.ERROR, noiselevel=-1)
+ missing_file_param = True
+
+ resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
+ resumecommand = mysettings.get(resumecommand_var)
+ if resumecommand is None:
+ resumecommand_var = "RESUMECOMMAND"
+ resumecommand = mysettings.get(resumecommand_var)
+ if resumecommand is None:
+ writemsg_level(
+ _("!!! %s is unset. It should "
+ "have been defined in\n!!! %s/make.globals.\n") \
+ % (resumecommand_var, global_config_path),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+ if "${FILE}" not in resumecommand:
+ writemsg_level(
+ _("!!! %s does not contain the required ${FILE}"
+ " parameter.\n") % resumecommand_var,
+ level=logging.ERROR, noiselevel=-1)
+ missing_file_param = True
+
+ if missing_file_param:
+ writemsg_level(
+ _("!!! Refer to the make.conf(5) man page for "
+ "information about how to\n!!! correctly specify "
+ "FETCHCOMMAND and RESUMECOMMAND.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ if myfile != os.path.basename(loc):
+ return 0
+
+ if not can_fetch:
+ if fetched != 2:
+ try:
+ mysize = os.stat(myfile_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ mysize = 0
+
+ if mysize == 0:
+ writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
+ noiselevel=-1)
+ elif size is None or size > mysize:
+ writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
+ noiselevel=-1)
+ else:
+ writemsg(_("!!! File %s is incorrect size, "
+ "but unable to retry.\n") % myfile, noiselevel=-1)
+ return 0
+ else:
+ continue
+
+ if fetched != 2 and has_space:
+ #we either need to resume or start the download
+ if fetched == 1:
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ fetched = 0
+ else:
+ if mystat.st_size < fetch_resume_size:
+ writemsg(_(">>> Deleting distfile with size "
+ "%d (smaller than " "PORTAGE_FETCH_RESU"
+ "ME_MIN_SIZE)\n") % mystat.st_size)
+ try:
+ os.unlink(myfile_path)
+ except OSError as e:
+ if e.errno not in \
+ (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ fetched = 0
+ if fetched == 1:
+ #resume mode:
+ writemsg(_(">>> Resuming download...\n"))
+ locfetch=resumecommand
+ command_var = resumecommand_var
+ else:
+ #normal mode:
+ locfetch=fetchcommand
+ command_var = fetchcommand_var
+ writemsg_stdout(_(">>> Downloading '%s'\n") % \
+ re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
+ variables = {
+ "DISTDIR": mysettings["DISTDIR"],
+ "URI": loc,
+ "FILE": myfile
+ }
+
+ myfetch = shlex_split(locfetch)
+ myfetch = [varexpand(x, mydict=variables) for x in myfetch]
+ myret = -1
+ try:
+
+ myret = _spawn_fetch(mysettings, myfetch)
+
+ finally:
+ try:
+ apply_secpass_permissions(myfile_path,
+ gid=portage_gid, mode=0o664, mask=0o2)
+ except FileNotFound:
+ pass
+ except PortageException as e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg(_("!!! Failed to adjust permissions:"
+ " %s\n") % str(e), noiselevel=-1)
+ del e
+
+ # If the file is empty then it's obviously invalid. Don't
+ # trust the return value from the fetcher. Remove the
+ # empty file and try to download again.
+ try:
+ if os.stat(myfile_path).st_size == 0:
+ os.unlink(myfile_path)
+ fetched = 0
+ continue
+ except EnvironmentError:
+ pass
+
+ if mydigests is not None and myfile in mydigests:
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ fetched = 0
+ else:
+
+ if stat.S_ISDIR(mystat.st_mode):
+ # This can happen if FETCHCOMMAND erroneously
+ # contains wget's -P option where it should
+ # instead have -O.
+ writemsg_level(
+ _("!!! The command specified in the "
+ "%s variable appears to have\n!!! "
+ "created a directory instead of a "
+ "normal file.\n") % command_var,
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level(
+ _("!!! Refer to the make.conf(5) "
+ "man page for information about how "
+ "to\n!!! correctly specify "
+ "FETCHCOMMAND and RESUMECOMMAND.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+
+ # no exception? file exists. let digestcheck() report
+ # an appropriately for size or checksum errors
+
+ # If the fetcher reported success and the file is
+ # too small, it's probably because the digest is
+ # bad (upstream changed the distfile). In this
+ # case we don't want to attempt to resume. Show a
+ # digest verification failure to that the user gets
+ # a clue about what just happened.
+ if myret != os.EX_OK and \
+ mystat.st_size < mydigests[myfile]["size"]:
+ # Fetch failed... Try the next one... Kill 404 files though.
+ if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
+ html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
+ if html404.search(io.open(
+ _unicode_encode(myfile_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace'
+ ).read()):
+ try:
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
+ fetched = 0
+ continue
+ except (IOError, OSError):
+ pass
+ fetched = 1
+ continue
+ if True:
+ # File is the correct size--check the checksums for the fetched
+ # file NOW, for those users who don't have a stable/continuous
+ # net connection. This way we have a chance to try to download
+ # from another mirror...
+ verified_ok,reason = verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
+ if not verified_ok:
+ print(reason)
+ writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
+ noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+ if reason[0] == _("Insufficient data for checksum verification"):
+ return 0
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+ fetched=0
+ checksum_failure_count += 1
+ if checksum_failure_count == \
+ checksum_failure_primaryuri:
+ # Switch to "primaryuri" mode in order
+ # to increase the probablility of
+ # of success.
+ primaryuris = \
+ primaryuri_dict.get(myfile)
+ if primaryuris:
+ uri_list.extend(
+ reversed(primaryuris))
+ if checksum_failure_count >= \
+ checksum_failure_max_tries:
+ break
+ else:
+ eout = EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
+ digests = mydigests.get(myfile)
+ if digests:
+ eout.ebegin("%s %s ;-)" % \
+ (myfile, " ".join(sorted(digests))))
+ eout.eend(0)
+ fetched=2
+ break
+ else:
+ if not myret:
+ fetched=2
+ break
+ elif mydigests!=None:
+ writemsg(_("No digest file available and download failed.\n\n"),
+ noiselevel=-1)
+ finally:
+ if use_locks and file_lock:
+ unlockfile(file_lock)
+ file_lock = None
+
+ if listonly:
+ writemsg_stdout("\n", noiselevel=-1)
+ if fetched != 2:
+ if restrict_fetch and not restrict_fetch_msg:
+ restrict_fetch_msg = True
+ msg = _("\n!!! %s/%s"
+ " has fetch restriction turned on.\n"
+ "!!! This probably means that this "
+ "ebuild's files must be downloaded\n"
+ "!!! manually. See the comments in"
+ " the ebuild for more information.\n\n") % \
+ (mysettings["CATEGORY"], mysettings["PF"])
+ writemsg_level(msg,
+ level=logging.ERROR, noiselevel=-1)
+ elif restrict_fetch:
+ pass
+ elif listonly:
+ pass
+ elif not filedict[myfile]:
+ writemsg(_("Warning: No mirrors available for file"
+ " '%s'\n") % (myfile), noiselevel=-1)
+ else:
+ writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
+ noiselevel=-1)
+
+ if listonly:
+ failed_files.add(myfile)
+ continue
+ elif fetchonly:
+ failed_files.add(myfile)
+ continue
+ return 0
+ if failed_files:
+ return 0
+ return 1
diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py
new file mode 100644
index 0000000..f2af638
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py
@@ -0,0 +1,124 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['getmaskingreason']
+
+import portage
+from portage import os
+from portage.const import USER_CONFIG_PATH
+from portage.dep import Atom, match_from_list, _slot_separator, _repo_separator
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.repository.config import _gen_valid_repo
+from portage.util import grablines, normalize_path
+from portage.versions import catpkgsplit
+from _emerge.Package import Package
+
+def getmaskingreason(mycpv, metadata=None, settings=None,
+ portdb=None, return_location=False, myrepo=None):
+ """
+ If specified, the myrepo argument is assumed to be valid. This
+ should be a safe assumption since portdbapi methods always
+ return valid repo names and valid "repository" metadata from
+ aux_get.
+ """
+ if settings is None:
+ settings = portage.settings
+ if portdb is None:
+ portdb = portage.portdb
+ mysplit = catpkgsplit(mycpv)
+ if not mysplit:
+ raise ValueError(_("invalid CPV: %s") % mycpv)
+
+ if metadata is None:
+ db_keys = list(portdb._aux_cache_keys)
+ try:
+ metadata = dict(zip(db_keys,
+ portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
+ except KeyError:
+ if not portdb.cpv_exists(mycpv):
+ raise
+ else:
+ if myrepo is None:
+ myrepo = _gen_valid_repo(metadata["repository"])
+
+ elif myrepo is None:
+ myrepo = metadata.get("repository")
+ if myrepo is not None:
+ myrepo = _gen_valid_repo(metadata["repository"])
+
+ if metadata is not None and \
+ not portage.eapi_is_supported(metadata["EAPI"]):
+ # Return early since otherwise we might produce invalid
+ # results given that the EAPI is not supported. Also,
+ # metadata is mostly useless in this case since it doesn't
+ # contain essential things like SLOT.
+ if return_location:
+ return (None, None)
+ else:
+ return None
+
+ # Sometimes we can't access SLOT or repository due to corruption.
+ pkg = mycpv
+ if metadata is not None:
+ pkg = "".join((mycpv, _slot_separator, metadata["SLOT"]))
+ # At this point myrepo should be None, a valid name, or
+ # Package.UNKNOWN_REPO which we ignore.
+ if myrepo is not None and myrepo != Package.UNKNOWN_REPO:
+ pkg = "".join((pkg, _repo_separator, myrepo))
+ cpv_slot_list = [pkg]
+
+ mycp=mysplit[0]+"/"+mysplit[1]
+
+ # XXX- This is a temporary duplicate of code from the config constructor.
+ locations = [os.path.join(settings["PORTDIR"], "profiles")]
+ locations.extend(settings.profiles)
+ for ov in settings["PORTDIR_OVERLAY"].split():
+ profdir = os.path.join(normalize_path(ov), "profiles")
+ if os.path.isdir(profdir):
+ locations.append(profdir)
+ locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH))
+ locations.reverse()
+ pmasklists = []
+ for profile in locations:
+ pmask_filename = os.path.join(profile, "package.mask")
+ pmasklists.append((pmask_filename, grablines(pmask_filename, recursive=1)))
+
+ pmaskdict = settings._mask_manager._pmaskdict
+ if mycp in pmaskdict:
+ for x in pmaskdict[mycp]:
+ if match_from_list(x, cpv_slot_list):
+ x = x.without_repo
+ for pmask in pmasklists:
+ comment = ""
+ comment_valid = -1
+ pmask_filename = pmask[0]
+ for i in range(len(pmask[1])):
+ l = pmask[1][i].strip()
+ try:
+ l_atom = Atom(l, allow_repo=True,
+ allow_wildcard=True).without_repo
+ except InvalidAtom:
+ l_atom = None
+ if l == "":
+ comment = ""
+ comment_valid = -1
+ elif l[0] == "#":
+ comment += (l+"\n")
+ comment_valid = i + 1
+ elif l_atom == x:
+ if comment_valid != i:
+ comment = ""
+ if return_location:
+ return (comment, pmask_filename)
+ else:
+ return comment
+ elif comment_valid != -1:
+ # Apparently this comment applies to multiple masks, so
+ # it remains valid until a blank line is encountered.
+ comment_valid += 1
+ if return_location:
+ return (None, None)
+ else:
+ return None
diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py
new file mode 100644
index 0000000..4c65fcc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py
@@ -0,0 +1,174 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['getmaskingstatus']
+
+import sys
+
+import portage
+from portage import eapi_is_supported, _eapi_is_deprecated
+from portage.dep import match_from_list, _slot_separator, _repo_separator
+from portage.localization import _
+from portage.package.ebuild.config import config
+from portage.versions import catpkgsplit, cpv_getkey
+from _emerge.Package import Package
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class _UnmaskHint(object):
+
+ __slots__ = ('key', 'value')
+
+ def __init__(self, key, value):
+ self.key = key
+ self.value = value
+
+class _MaskReason(object):
+
+ __slots__ = ('category', 'message', 'unmask_hint')
+
+ def __init__(self, category, message, unmask_hint=None):
+ self.category = category
+ self.message = message
+ self.unmask_hint = unmask_hint
+
+def getmaskingstatus(mycpv, settings=None, portdb=None, myrepo=None):
+ if settings is None:
+ settings = config(clone=portage.settings)
+ if portdb is None:
+ portdb = portage.portdb
+
+ return [mreason.message for \
+ mreason in _getmaskingstatus(mycpv, settings, portdb,myrepo)]
+
+def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
+
+ metadata = None
+ installed = False
+ if not isinstance(mycpv, basestring):
+ # emerge passed in a Package instance
+ pkg = mycpv
+ mycpv = pkg.cpv
+ metadata = pkg.metadata
+ installed = pkg.installed
+
+ mysplit = catpkgsplit(mycpv)
+ if not mysplit:
+ raise ValueError(_("invalid CPV: %s") % mycpv)
+ if metadata is None:
+ db_keys = list(portdb._aux_cache_keys)
+ try:
+ metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
+ except KeyError:
+ if not portdb.cpv_exists(mycpv):
+ raise
+ return [_MaskReason("corruption", "corruption")]
+ if "?" in metadata["LICENSE"]:
+ settings.setcpv(mycpv, mydb=metadata)
+ metadata["USE"] = settings["PORTAGE_USE"]
+ else:
+ metadata["USE"] = ""
+
+ rValue = []
+
+ # profile checking
+ if settings._getProfileMaskAtom(mycpv, metadata):
+ rValue.append(_MaskReason("profile", "profile"))
+
+ # package.mask checking
+ if settings._getMaskAtom(mycpv, metadata):
+ rValue.append(_MaskReason("package.mask", "package.mask", _UnmaskHint("p_mask", None)))
+
+ # keywords checking
+ eapi = metadata["EAPI"]
+ mygroups = settings._getKeywords(mycpv, metadata)
+ licenses = metadata["LICENSE"]
+ properties = metadata["PROPERTIES"]
+ if eapi.startswith("-"):
+ eapi = eapi[1:]
+ if not eapi_is_supported(eapi):
+ return [_MaskReason("EAPI", "EAPI %s" % eapi)]
+ elif _eapi_is_deprecated(eapi) and not installed:
+ return [_MaskReason("EAPI", "EAPI %s" % eapi)]
+ egroups = settings.configdict["backupenv"].get(
+ "ACCEPT_KEYWORDS", "").split()
+ global_accept_keywords = settings.get("ACCEPT_KEYWORDS", "")
+ pgroups = global_accept_keywords.split()
+ myarch = settings["ARCH"]
+ if pgroups and myarch not in pgroups:
+ """For operating systems other than Linux, ARCH is not necessarily a
+ valid keyword."""
+ myarch = pgroups[0].lstrip("~")
+
+ # NOTE: This logic is copied from KeywordsManager.getMissingKeywords().
+ unmaskgroups = settings._keywords_manager.getPKeywords(mycpv,
+ metadata["SLOT"], metadata["repository"], global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+ if unmaskgroups or egroups:
+ pgroups = settings._keywords_manager._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ kmask = "missing"
+ kmask_hint = None
+
+ if '**' in pgroups:
+ kmask = None
+ else:
+ for keyword in pgroups:
+ if keyword in mygroups:
+ kmask = None
+ break
+
+ if kmask:
+ for gp in mygroups:
+ if gp=="*":
+ kmask=None
+ break
+ elif gp=="-"+myarch and myarch in pgroups:
+ kmask="-"+myarch
+ break
+ elif gp=="~"+myarch and myarch in pgroups:
+ kmask="~"+myarch
+ kmask_hint = _UnmaskHint("unstable keyword", kmask)
+ break
+
+ if kmask == "missing":
+ kmask_hint = _UnmaskHint("unstable keyword", "**")
+
+ try:
+ missing_licenses = settings._getMissingLicenses(mycpv, metadata)
+ if missing_licenses:
+ allowed_tokens = set(["||", "(", ")"])
+ allowed_tokens.update(missing_licenses)
+ license_split = licenses.split()
+ license_split = [x for x in license_split \
+ if x in allowed_tokens]
+ msg = license_split[:]
+ msg.append("license(s)")
+ rValue.append(_MaskReason("LICENSE", " ".join(msg), _UnmaskHint("license", set(missing_licenses))))
+ except portage.exception.InvalidDependString as e:
+ rValue.append(_MaskReason("invalid", "LICENSE: "+str(e)))
+
+ try:
+ missing_properties = settings._getMissingProperties(mycpv, metadata)
+ if missing_properties:
+ allowed_tokens = set(["||", "(", ")"])
+ allowed_tokens.update(missing_properties)
+ properties_split = properties.split()
+ properties_split = [x for x in properties_split \
+ if x in allowed_tokens]
+ msg = properties_split[:]
+ msg.append("properties")
+ rValue.append(_MaskReason("PROPERTIES", " ".join(msg)))
+ except portage.exception.InvalidDependString as e:
+ rValue.append(_MaskReason("invalid", "PROPERTIES: "+str(e)))
+
+ # Only show KEYWORDS masks for installed packages
+ # if they're not masked for any other reason.
+ if kmask and (not installed or not rValue):
+ rValue.append(_MaskReason("KEYWORDS",
+ kmask + " keyword", unmask_hint=kmask_hint))
+
+ return rValue
diff --git a/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py
new file mode 100644
index 0000000..616dc2e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py
@@ -0,0 +1,370 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['prepare_build_dirs']
+
+import errno
+import gzip
+import shutil
+import stat
+import time
+
+from portage import os, _encodings, _unicode_encode, _unicode_decode
+from portage.data import portage_gid, portage_uid, secpass
+from portage.exception import DirectoryNotFound, FileNotFound, \
+ OperationNotPermitted, PermissionDenied, PortageException
+from portage.localization import _
+from portage.output import colorize
+from portage.util import apply_recursive_permissions, \
+ apply_secpass_permissions, ensure_dirs, normalize_path, writemsg
+
+def prepare_build_dirs(myroot=None, settings=None, cleanup=False):
+ """
+ The myroot parameter is ignored.
+ """
+ myroot = None
+
+ if settings is None:
+ raise TypeError("settings argument is required")
+
+ mysettings = settings
+ clean_dirs = [mysettings["HOME"]]
+
+ # We enable cleanup when we want to make sure old cruft (such as the old
+ # environment) doesn't interfere with the current phase.
+ if cleanup and 'keeptemp' not in mysettings.features:
+ clean_dirs.append(mysettings["T"])
+
+ for clean_dir in clean_dirs:
+ try:
+ shutil.rmtree(clean_dir)
+ except OSError as oe:
+ if errno.ENOENT == oe.errno:
+ pass
+ elif errno.EPERM == oe.errno:
+ writemsg("%s\n" % oe, noiselevel=-1)
+ writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
+ clean_dir, noiselevel=-1)
+ return 1
+ else:
+ raise
+
+ def makedirs(dir_path):
+ try:
+ os.makedirs(dir_path)
+ except OSError as oe:
+ if errno.EEXIST == oe.errno:
+ pass
+ elif errno.EPERM == oe.errno:
+ writemsg("%s\n" % oe, noiselevel=-1)
+ writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
+ dir_path, noiselevel=-1)
+ return False
+ else:
+ raise
+ return True
+
+ mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
+
+ mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
+ mydirs.append(os.path.dirname(mydirs[-1]))
+
+ try:
+ for mydir in mydirs:
+ ensure_dirs(mydir)
+ try:
+ apply_secpass_permissions(mydir,
+ gid=portage_gid, uid=portage_uid, mode=0o70, mask=0)
+ except PortageException:
+ if not os.path.isdir(mydir):
+ raise
+ for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
+ """These directories don't necessarily need to be group writable.
+ However, the setup phase is commonly run as a privileged user prior
+ to the other phases being run by an unprivileged user. Currently,
+ we use the portage group to ensure that the unprivleged user still
+ has write access to these directories in any case."""
+ ensure_dirs(mysettings[dir_key], mode=0o775)
+ apply_secpass_permissions(mysettings[dir_key],
+ uid=portage_uid, gid=portage_gid)
+ except PermissionDenied as e:
+ writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
+ return 1
+ except OperationNotPermitted as e:
+ writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
+ return 1
+ except FileNotFound as e:
+ writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
+ return 1
+
+ # Reset state for things like noauto and keepwork in FEATURES.
+ for x in ('.die_hooks',):
+ try:
+ os.unlink(os.path.join(mysettings['PORTAGE_BUILDDIR'], x))
+ except OSError:
+ pass
+
+ _prepare_workdir(mysettings)
+ if mysettings.get("EBUILD_PHASE") not in ("info", "fetch", "pretend"):
+ # Avoid spurious permissions adjustments when fetching with
+ # a temporary PORTAGE_TMPDIR setting (for fetchonly).
+ _prepare_features_dirs(mysettings)
+
+def _adjust_perms_msg(settings, msg):
+
+ def write(msg):
+ writemsg(msg, noiselevel=-1)
+
+ background = settings.get("PORTAGE_BACKGROUND") == "1"
+ log_path = settings.get("PORTAGE_LOG_FILE")
+ log_file = None
+
+ if background and log_path is not None:
+ try:
+ log_file = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'), mode='ab')
+ except IOError:
+ def write(msg):
+ pass
+ else:
+ if log_path.endswith('.gz'):
+ log_file = gzip.GzipFile(filename='',
+ mode='ab', fileobj=log_file)
+ def write(msg):
+ log_file.write(_unicode_encode(msg))
+ log_file.flush()
+
+ try:
+ write(msg)
+ finally:
+ if log_file is not None:
+ log_file.close()
+
+def _prepare_features_dirs(mysettings):
+
+ # Use default ABI libdir in accordance with bug #355283.
+ libdir = None
+ default_abi = mysettings.get("DEFAULT_ABI")
+ if default_abi:
+ libdir = mysettings.get("LIBDIR_" + default_abi)
+ if not libdir:
+ libdir = "lib"
+
+ features_dirs = {
+ "ccache":{
+ "path_dir": "/usr/%s/ccache/bin" % (libdir,),
+ "basedir_var":"CCACHE_DIR",
+ "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
+ "always_recurse":False},
+ "distcc":{
+ "path_dir": "/usr/%s/distcc/bin" % (libdir,),
+ "basedir_var":"DISTCC_DIR",
+ "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
+ "subdirs":("lock", "state"),
+ "always_recurse":True}
+ }
+ dirmode = 0o2070
+ filemode = 0o60
+ modemask = 0o2
+ restrict = mysettings.get("PORTAGE_RESTRICT","").split()
+ droppriv = secpass >= 2 and \
+ "userpriv" in mysettings.features and \
+ "userpriv" not in restrict
+ for myfeature, kwargs in features_dirs.items():
+ if myfeature in mysettings.features:
+ failure = False
+ basedir = mysettings.get(kwargs["basedir_var"])
+ if basedir is None or not basedir.strip():
+ basedir = kwargs["default_dir"]
+ mysettings[kwargs["basedir_var"]] = basedir
+ try:
+ path_dir = kwargs["path_dir"]
+ if not os.path.isdir(path_dir):
+ raise DirectoryNotFound(path_dir)
+
+ mydirs = [mysettings[kwargs["basedir_var"]]]
+ if "subdirs" in kwargs:
+ for subdir in kwargs["subdirs"]:
+ mydirs.append(os.path.join(basedir, subdir))
+ for mydir in mydirs:
+ modified = ensure_dirs(mydir)
+ # Generally, we only want to apply permissions for
+ # initial creation. Otherwise, we don't know exactly what
+ # permissions the user wants, so should leave them as-is.
+ droppriv_fix = False
+ if droppriv:
+ st = os.stat(mydir)
+ if st.st_gid != portage_gid or \
+ not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
+ droppriv_fix = True
+ if not droppriv_fix:
+ # Check permissions of files in the directory.
+ for filename in os.listdir(mydir):
+ try:
+ subdir_st = os.lstat(
+ os.path.join(mydir, filename))
+ except OSError:
+ continue
+ if subdir_st.st_gid != portage_gid or \
+ ((stat.S_ISDIR(subdir_st.st_mode) and \
+ not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
+ droppriv_fix = True
+ break
+
+ if droppriv_fix:
+ _adjust_perms_msg(mysettings,
+ colorize("WARN", " * ") + \
+ _("Adjusting permissions "
+ "for FEATURES=userpriv: '%s'\n") % mydir)
+ elif modified:
+ _adjust_perms_msg(mysettings,
+ colorize("WARN", " * ") + \
+ _("Adjusting permissions "
+ "for FEATURES=%s: '%s'\n") % (myfeature, mydir))
+
+ if modified or kwargs["always_recurse"] or droppriv_fix:
+ def onerror(e):
+ raise # The feature is disabled if a single error
+ # occurs during permissions adjustment.
+ if not apply_recursive_permissions(mydir,
+ gid=portage_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise OperationNotPermitted(
+ _("Failed to apply recursive permissions for the portage group."))
+
+ except DirectoryNotFound as e:
+ failure = True
+ writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
+ (e,), noiselevel=-1)
+ writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
+ noiselevel=-1)
+
+ except PortageException as e:
+ failure = True
+ writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
+ (kwargs["basedir_var"], basedir), noiselevel=-1)
+ writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
+ noiselevel=-1)
+
+ if failure:
+ mysettings.features.remove(myfeature)
+ time.sleep(5)
+
+def _prepare_workdir(mysettings):
+ workdir_mode = 0o700
+ try:
+ mode = mysettings["PORTAGE_WORKDIR_MODE"]
+ if mode.isdigit():
+ parsed_mode = int(mode, 8)
+ elif mode == "":
+ raise KeyError()
+ else:
+ raise ValueError()
+ if parsed_mode & 0o7777 != parsed_mode:
+ raise ValueError("Invalid file mode: %s" % mode)
+ else:
+ workdir_mode = parsed_mode
+ except KeyError as e:
+ writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
+ except ValueError as e:
+ if len(str(e)) > 0:
+ writemsg("%s\n" % e)
+ writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
+ (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
+ mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
+ try:
+ apply_secpass_permissions(mysettings["WORKDIR"],
+ uid=portage_uid, gid=portage_gid, mode=workdir_mode)
+ except FileNotFound:
+ pass # ebuild.sh will create it
+
+ if mysettings.get("PORT_LOGDIR", "") == "":
+ while "PORT_LOGDIR" in mysettings:
+ del mysettings["PORT_LOGDIR"]
+ if "PORT_LOGDIR" in mysettings:
+ try:
+ modified = ensure_dirs(mysettings["PORT_LOGDIR"])
+ if modified:
+ # Only initialize group/mode if the directory doesn't
+ # exist, so that we don't override permissions if they
+ # were previously set by the administrator.
+ # NOTE: These permissions should be compatible with our
+ # default logrotate config as discussed in bug 374287.
+ apply_secpass_permissions(mysettings["PORT_LOGDIR"],
+ uid=portage_uid, gid=portage_gid, mode=0o2770)
+ except PortageException as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
+ mysettings["PORT_LOGDIR"], noiselevel=-1)
+ writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
+ while "PORT_LOGDIR" in mysettings:
+ del mysettings["PORT_LOGDIR"]
+
+ compress_log_ext = ''
+ if 'compress-build-logs' in mysettings.features:
+ compress_log_ext = '.gz'
+
+ logdir_subdir_ok = False
+ if "PORT_LOGDIR" in mysettings and \
+ os.access(mysettings["PORT_LOGDIR"], os.W_OK):
+ logdir = normalize_path(mysettings["PORT_LOGDIR"])
+ logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
+ if not os.path.exists(logid_path):
+ open(_unicode_encode(logid_path), 'w')
+ logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
+ time.gmtime(os.stat(logid_path).st_mtime)),
+ encoding=_encodings['content'], errors='replace')
+
+ if "split-log" in mysettings.features:
+ log_subdir = os.path.join(logdir, "build", mysettings["CATEGORY"])
+ mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+ log_subdir, "%s:%s.log%s" %
+ (mysettings["PF"], logid_time, compress_log_ext))
+ else:
+ log_subdir = logdir
+ mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+ logdir, "%s:%s:%s.log%s" % \
+ (mysettings["CATEGORY"], mysettings["PF"], logid_time,
+ compress_log_ext))
+
+ if log_subdir is logdir:
+ logdir_subdir_ok = True
+ else:
+ try:
+ _ensure_log_subdirs(logdir, log_subdir)
+ except PortageException as e:
+ writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+
+ if os.access(log_subdir, os.W_OK):
+ logdir_subdir_ok = True
+ else:
+ writemsg(_unicode_decode("!!! %s: %s\n") %
+ (_("Permission Denied"), log_subdir), noiselevel=-1)
+
+ if not logdir_subdir_ok:
+ # NOTE: When sesandbox is enabled, the local SELinux security policies
+ # may not allow output to be piped out of the sesandbox domain. The
+ # current policy will allow it to work when a pty is available, but
+ # not through a normal pipe. See bug #162404.
+ mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+ mysettings["T"], "build.log%s" % compress_log_ext)
+
+def _ensure_log_subdirs(logdir, subdir):
+ """
+ This assumes that logdir exists, and creates subdirectories down
+ to subdir as necessary. The gid of logdir is copied to all
+ subdirectories, along with 0x2070 mode bits if present. Both logdir
+ and subdir are assumed to be normalized absolute paths.
+ """
+ st = os.stat(logdir)
+ gid = st.st_gid
+ grp_mode = 0o2070 & st.st_mode
+
+ logdir_split_len = len(logdir.split(os.sep))
+ subdir_split = subdir.split(os.sep)[logdir_split_len:]
+ subdir_split.reverse()
+ current = logdir
+ while subdir_split:
+ current = os.path.join(current, subdir_split.pop())
+ ensure_dirs(current, gid=gid, mode=grp_mode, mask=0)
diff --git a/portage_with_autodep/pym/portage/process.py b/portage_with_autodep/pym/portage/process.py
new file mode 100644
index 0000000..6866a2f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/process.py
@@ -0,0 +1,427 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+import atexit
+import signal
+import sys
+import traceback
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:dump_traceback',
+)
+
+from portage.const import BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY, AUTODEP_LIBRARY
+from portage.exception import CommandNotFound
+
+try:
+ import resource
+ max_fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+except ImportError:
+ max_fd_limit = 256
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+if os.path.isdir("/proc/%i/fd" % os.getpid()):
+ def get_open_fds():
+ return (int(fd) for fd in os.listdir("/proc/%i/fd" % os.getpid()) \
+ if fd.isdigit())
+else:
+ def get_open_fds():
+ return range(max_fd_limit)
+
+sandbox_capable = (os.path.isfile(SANDBOX_BINARY) and
+ os.access(SANDBOX_BINARY, os.X_OK))
+
+autodep_capable = (os.path.isfile(AUTODEP_LIBRARY) and
+ os.access(AUTODEP_LIBRARY, os.X_OK))
+
+fakeroot_capable = (os.path.isfile(FAKEROOT_BINARY) and
+ os.access(FAKEROOT_BINARY, os.X_OK))
+
+def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
+ """
+ Spawns a bash shell running a specific commands
+
+ @param mycommand: The command for bash to run
+ @type mycommand: String
+ @param debug: Turn bash debugging on (set -x)
+ @type debug: Boolean
+ @param opt_name: Name of the spawned process (detaults to binary name)
+ @type opt_name: String
+ @param keywords: Extra Dictionary arguments to pass to spawn
+ @type keywords: Dictionary
+ """
+
+ args = [BASH_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ if debug:
+ # Print commands and their arguments as they are executed.
+ args.append("-x")
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_autodep(mycommand, opt_name=None, **keywords):
+ if not autodep_capable:
+ return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+ if "env" not in keywords or "LOG_SOCKET" not in keywords["env"]:
+ return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+
+ # Core part: tell the loader to preload logging library
+ keywords["env"]["LD_PRELOAD"]=AUTODEP_LIBRARY
+ return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+
+def spawn_sandbox(mycommand, opt_name=None, **keywords):
+ if not sandbox_capable:
+ return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+ args=[SANDBOX_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_fakeroot(mycommand, fakeroot_state=None, opt_name=None, **keywords):
+ args=[FAKEROOT_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ if fakeroot_state:
+ open(fakeroot_state, "a").close()
+ args.append("-s")
+ args.append(fakeroot_state)
+ args.append("-i")
+ args.append(fakeroot_state)
+ args.append("--")
+ args.append(BASH_BINARY)
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+_exithandlers = []
+def atexit_register(func, *args, **kargs):
+ """Wrapper around atexit.register that is needed in order to track
+ what is registered. For example, when portage restarts itself via
+ os.execv, the atexit module does not work so we have to do it
+ manually by calling the run_exitfuncs() function in this module."""
+ _exithandlers.append((func, args, kargs))
+
+def run_exitfuncs():
+ """This should behave identically to the routine performed by
+ the atexit module at exit time. It's only necessary to call this
+ function when atexit will not work (because of os.execv, for
+ example)."""
+
+ # This function is a copy of the private atexit._run_exitfuncs()
+ # from the python 2.4.2 sources. The only difference from the
+ # original function is in the output to stderr.
+ exc_info = None
+ while _exithandlers:
+ func, targs, kargs = _exithandlers.pop()
+ try:
+ func(*targs, **kargs)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ except: # No idea what they called, so we need this broad except here.
+ dump_traceback("Error in portage.process.run_exitfuncs", noiselevel=0)
+ exc_info = sys.exc_info()
+
+ if exc_info is not None:
+ if sys.hexversion >= 0x3000000:
+ raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
+ else:
+ exec("raise exc_info[0], exc_info[1], exc_info[2]")
+
+atexit.register(run_exitfuncs)
+
+# We need to make sure that any processes spawned are killed off when
+# we exit. spawn() takes care of adding and removing pids to this list
+# as it creates and cleans up processes.
+spawned_pids = []
+def cleanup():
+ while spawned_pids:
+ pid = spawned_pids.pop()
+ try:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ if os.waitpid(pid, os.WNOHANG)[0] == 0:
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+ except OSError:
+ # This pid has been cleaned up outside
+ # of spawn().
+ pass
+
+atexit_register(cleanup)
+
+def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
+ uid=None, gid=None, groups=None, umask=None, logfile=None,
+ path_lookup=True, pre_exec=None):
+ """
+ Spawns a given command.
+
+ @param mycommand: the command to execute
+ @type mycommand: String or List (Popen style list)
+ @param env: A dict of Key=Value pairs for env variables
+ @type env: Dictionary
+ @param opt_name: an optional name for the spawn'd process (defaults to the binary name)
+ @type opt_name: String
+ @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
+ @type fd_pipes: Dictionary
+ @param returnpid: Return the Process IDs for a successful spawn.
+ NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
+ @type returnpid: Boolean
+ @param uid: User ID to spawn as; useful for dropping privilages
+ @type uid: Integer
+ @param gid: Group ID to spawn as; useful for dropping privilages
+ @type gid: Integer
+ @param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
+ @type groups: List
+ @param umask: An integer representing the umask for the process (see man chmod for umask details)
+ @type umask: Integer
+ @param logfile: name of a file to use for logging purposes
+ @type logfile: String
+ @param path_lookup: If the binary is not fully specified then look for it in PATH
+ @type path_lookup: Boolean
+ @param pre_exec: A function to be called with no arguments just prior to the exec call.
+ @type pre_exec: callable
+
+ logfile requires stdout and stderr to be assigned to this process (ie not pointed
+ somewhere else.)
+
+ """
+
+ # mycommand is either a str or a list
+ if isinstance(mycommand, basestring):
+ mycommand = mycommand.split()
+
+ if sys.hexversion < 0x3000000:
+ # Avoid a potential UnicodeEncodeError from os.execve().
+ env_bytes = {}
+ for k, v in env.items():
+ env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
+ _unicode_encode(v, encoding=_encodings['content'])
+ env = env_bytes
+ del env_bytes
+
+ # If an absolute path to an executable file isn't given
+ # search for it unless we've been told not to.
+ binary = mycommand[0]
+ if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
+ (not os.path.isabs(binary) or not os.path.isfile(binary)
+ or not os.access(binary, os.X_OK)):
+ binary = path_lookup and find_binary(binary) or None
+ if not binary:
+ raise CommandNotFound(mycommand[0])
+
+ # If we haven't been told what file descriptors to use
+ # default to propagating our stdin, stdout and stderr.
+ if fd_pipes is None:
+ fd_pipes = {
+ 0:sys.stdin.fileno(),
+ 1:sys.stdout.fileno(),
+ 2:sys.stderr.fileno(),
+ }
+
+ # mypids will hold the pids of all processes created.
+ mypids = []
+
+ if logfile:
+ # Using a log file requires that stdout and stderr
+ # are assigned to the process we're running.
+ if 1 not in fd_pipes or 2 not in fd_pipes:
+ raise ValueError(fd_pipes)
+
+ # Create a pipe
+ (pr, pw) = os.pipe()
+
+ # Create a tee process, giving it our stdout and stderr
+ # as well as the read end of the pipe.
+ mypids.extend(spawn(('tee', '-i', '-a', logfile),
+ returnpid=True, fd_pipes={0:pr,
+ 1:fd_pipes[1], 2:fd_pipes[2]}))
+
+ # We don't need the read end of the pipe, so close it.
+ os.close(pr)
+
+ # Assign the write end of the pipe to our stdout and stderr.
+ fd_pipes[1] = pw
+ fd_pipes[2] = pw
+
+ pid = os.fork()
+
+ if not pid:
+ try:
+ _exec(binary, mycommand, opt_name, fd_pipes,
+ env, gid, groups, uid, umask, pre_exec)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # We need to catch _any_ exception so that it doesn't
+ # propagate out of this function and cause exiting
+ # with anything other than os._exit()
+ sys.stderr.write("%s:\n %s\n" % (e, " ".join(mycommand)))
+ traceback.print_exc()
+ sys.stderr.flush()
+ os._exit(1)
+
+ # Add the pid to our local and the global pid lists.
+ mypids.append(pid)
+ spawned_pids.append(pid)
+
+ # If we started a tee process the write side of the pipe is no
+ # longer needed, so close it.
+ if logfile:
+ os.close(pw)
+
+ # If the caller wants to handle cleaning up the processes, we tell
+ # it about all processes that were created.
+ if returnpid:
+ return mypids
+
+ # Otherwise we clean them up.
+ while mypids:
+
+ # Pull the last reader in the pipe chain. If all processes
+ # in the pipe are well behaved, it will die when the process
+ # it is reading from dies.
+ pid = mypids.pop(0)
+
+ # and wait for it.
+ retval = os.waitpid(pid, 0)[1]
+
+ # When it's done, we can remove it from the
+ # global pid list as well.
+ spawned_pids.remove(pid)
+
+ if retval:
+ # If it failed, kill off anything else that
+ # isn't dead yet.
+ for pid in mypids:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ if os.waitpid(pid, os.WNOHANG)[0] == 0:
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+ spawned_pids.remove(pid)
+
+ # If it got a signal, return the signal that was sent.
+ if (retval & 0xff):
+ return ((retval & 0xff) << 8)
+
+ # Otherwise, return its exit code.
+ return (retval >> 8)
+
+ # Everything succeeded
+ return 0
+
+def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
+ pre_exec):
+
+ """
+ Execute a given binary with options
+
+ @param binary: Name of program to execute
+ @type binary: String
+ @param mycommand: Options for program
+ @type mycommand: String
+ @param opt_name: Name of process (defaults to binary)
+ @type opt_name: String
+ @param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 }
+ @type fd_pipes: Dictionary
+ @param env: Key,Value mapping for Environmental Variables
+ @type env: Dictionary
+ @param gid: Group ID to run the process under
+ @type gid: Integer
+ @param groups: Groups the Process should be in.
+ @type groups: Integer
+ @param uid: User ID to run the process under
+ @type uid: Integer
+ @param umask: an int representing a unix umask (see man chmod for umask details)
+ @type umask: Integer
+ @param pre_exec: A function to be called with no arguments just prior to the exec call.
+ @type pre_exec: callable
+ @rtype: None
+ @returns: Never returns (calls os.execve)
+ """
+
+ # If the process we're creating hasn't been given a name
+ # assign it the name of the executable.
+ if not opt_name:
+ opt_name = os.path.basename(binary)
+
+ # Set up the command's argument list.
+ myargs = [opt_name]
+ myargs.extend(mycommand[1:])
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ # Quiet killing of subprocesses by SIGPIPE (see bug #309001).
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ # Avoid issues triggered by inheritance of SIGQUIT handler from
+ # the parent process (see bug #289486).
+ signal.signal(signal.SIGQUIT, signal.SIG_DFL)
+
+ _setup_pipes(fd_pipes)
+
+ # Set requested process permissions.
+ if gid:
+ os.setgid(gid)
+ if groups:
+ os.setgroups(groups)
+ if uid:
+ os.setuid(uid)
+ if umask:
+ os.umask(umask)
+ if pre_exec:
+ pre_exec()
+
+ # And switch to the new process.
+ os.execve(binary, myargs, env)
+
+def _setup_pipes(fd_pipes):
+ """Setup pipes for a forked process."""
+ my_fds = {}
+ # To protect from cases where direct assignment could
+ # clobber needed fds ({1:2, 2:1}) we first dupe the fds
+ # into unused fds.
+ for fd in fd_pipes:
+ my_fds[fd] = os.dup(fd_pipes[fd])
+ # Then assign them to what they should be.
+ for fd in my_fds:
+ os.dup2(my_fds[fd], fd)
+ # Then close _all_ fds that haven't been explicitly
+ # requested to be kept open.
+ for fd in get_open_fds():
+ if fd not in my_fds:
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+
+def find_binary(binary):
+ """
+ Given a binary name, find the binary in PATH
+
+ @param binary: Name of the binary to find
+ @type string
+ @rtype: None or string
+ @returns: full path to binary or None if the binary could not be located.
+ """
+ for path in os.environ.get("PATH", "").split(":"):
+ filename = "%s/%s" % (path, binary)
+ if os.access(filename, os.X_OK) and os.path.isfile(filename):
+ return filename
+ return None
diff --git a/portage_with_autodep/pym/portage/proxy/__init__.py b/portage_with_autodep/pym/portage/proxy/__init__.py
new file mode 100644
index 0000000..f98c564
--- /dev/null
+++ b/portage_with_autodep/pym/portage/proxy/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/proxy/lazyimport.py b/portage_with_autodep/pym/portage/proxy/lazyimport.py
new file mode 100644
index 0000000..ad4a542
--- /dev/null
+++ b/portage_with_autodep/pym/portage/proxy/lazyimport.py
@@ -0,0 +1,212 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['lazyimport']
+
+import sys
+import types
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+from portage.proxy.objectproxy import ObjectProxy
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+_module_proxies = {}
+_module_proxies_lock = threading.RLock()
+
+def _preload_portage_submodules():
+ """
+ Load lazily referenced portage submodules into memory,
+ so imports won't fail during portage upgrade/downgrade.
+ Note that this recursively loads only the modules that
+ are lazily referenced by currently imported modules,
+ so some portage submodules may still remain unimported
+ after this function is called.
+ """
+ imported = set()
+ while True:
+ remaining = False
+ for name in list(_module_proxies):
+ if name.startswith('portage.'):
+ if name in imported:
+ continue
+ imported.add(name)
+ remaining = True
+ __import__(name)
+ _unregister_module_proxy(name)
+ if not remaining:
+ break
+
+def _register_module_proxy(name, proxy):
+ _module_proxies_lock.acquire()
+ try:
+ proxy_list = _module_proxies.get(name)
+ if proxy_list is None:
+ proxy_list = []
+ _module_proxies[name] = proxy_list
+ proxy_list.append(proxy)
+ finally:
+ _module_proxies_lock.release()
+
+def _unregister_module_proxy(name):
+ """
+ Destroy all proxies that reference the give module name. Also, check
+ for other proxies referenced by modules that have been imported and
+ destroy those proxies too. This way, destruction of a single proxy
+ can trigger destruction of all the rest. If a target module appears
+ to be partially imported (indicated when an AttributeError is caught),
+ this function will leave in place proxies that reference it.
+ """
+ _module_proxies_lock.acquire()
+ try:
+ if name in _module_proxies:
+ modules = sys.modules
+ for name, proxy_list in list(_module_proxies.items()):
+ if name not in modules:
+ continue
+ # First delete this name from the dict so that
+ # if this same thread reenters below, it won't
+ # enter this path again.
+ del _module_proxies[name]
+ try:
+ while proxy_list:
+ proxy = proxy_list.pop()
+ object.__getattribute__(proxy, '_get_target')()
+ except AttributeError:
+ # Apparently the target module is only partially
+ # imported, so proxies that reference it cannot
+ # be destroyed yet.
+ proxy_list.append(proxy)
+ _module_proxies[name] = proxy_list
+ finally:
+ _module_proxies_lock.release()
+
+class _LazyImport(ObjectProxy):
+
+ __slots__ = ('_scope', '_alias', '_name', '_target')
+
+ def __init__(self, scope, alias, name):
+ ObjectProxy.__init__(self)
+ object.__setattr__(self, '_scope', scope)
+ object.__setattr__(self, '_alias', alias)
+ object.__setattr__(self, '_name', name)
+ _register_module_proxy(name, self)
+
+ def _get_target(self):
+ try:
+ return object.__getattribute__(self, '_target')
+ except AttributeError:
+ pass
+ name = object.__getattribute__(self, '_name')
+ __import__(name)
+ target = sys.modules[name]
+ object.__setattr__(self, '_target', target)
+ object.__getattribute__(self, '_scope')[
+ object.__getattribute__(self, '_alias')] = target
+ _unregister_module_proxy(name)
+ return target
+
+class _LazyImportFrom(_LazyImport):
+
+ __slots__ = ('_attr_name',)
+
+ def __init__(self, scope, name, attr_name, alias):
+ object.__setattr__(self, '_attr_name', attr_name)
+ _LazyImport.__init__(self, scope, alias, name)
+
+ def _get_target(self):
+ try:
+ return object.__getattribute__(self, '_target')
+ except AttributeError:
+ pass
+ name = object.__getattribute__(self, '_name')
+ attr_name = object.__getattribute__(self, '_attr_name')
+ __import__(name)
+ # If called by _unregister_module_proxy() and the target module is
+ # partially imported, then the following getattr call may raise an
+ # AttributeError for _unregister_module_proxy() to handle.
+ target = getattr(sys.modules[name], attr_name)
+ object.__setattr__(self, '_target', target)
+ object.__getattribute__(self, '_scope')[
+ object.__getattribute__(self, '_alias')] = target
+ _unregister_module_proxy(name)
+ return target
+
+def lazyimport(scope, *args):
+ """
+ Create a proxy in the given scope in order to performa a lazy import.
+
+ Syntax Result
+ foo import foo
+ foo:bar,baz from foo import bar, baz
+ foo:bar@baz from foo import bar as baz
+
+ @param scope: the scope in which to place the import, typically globals()
+ @type myfilename: dict
+ @param args: module names to import
+ @type args: strings
+ """
+
+ modules = sys.modules
+
+ for s in args:
+ parts = s.split(':', 1)
+ if len(parts) == 1:
+ name = s
+
+ if not name or not isinstance(name, basestring):
+ raise ValueError(name)
+
+ components = name.split('.')
+ parent_scope = scope
+ for i in range(len(components)):
+ alias = components[i]
+ if i < len(components) - 1:
+ parent_name = ".".join(components[:i+1])
+ __import__(parent_name)
+ mod = modules.get(parent_name)
+ if not isinstance(mod, types.ModuleType):
+ # raise an exception
+ __import__(name)
+ parent_scope[alias] = mod
+ parent_scope = mod.__dict__
+ continue
+
+ already_imported = modules.get(name)
+ if already_imported is not None:
+ parent_scope[alias] = already_imported
+ else:
+ parent_scope[alias] = \
+ _LazyImport(parent_scope, alias, name)
+
+ else:
+ name, fromlist = parts
+ already_imported = modules.get(name)
+ fromlist = fromlist.split(',')
+ for s in fromlist:
+ if not s:
+ # This happens if there's an extra comma in fromlist.
+ raise ValueError('Empty module attribute name')
+ alias = s.split('@', 1)
+ if len(alias) == 1:
+ alias = alias[0]
+ attr_name = alias
+ else:
+ attr_name, alias = alias
+ if already_imported is not None:
+ try:
+ scope[alias] = getattr(already_imported, attr_name)
+ except AttributeError:
+ # Apparently the target module is only partially
+ # imported, so create a proxy.
+ already_imported = None
+ scope[alias] = \
+ _LazyImportFrom(scope, name, attr_name, alias)
+ else:
+ scope[alias] = \
+ _LazyImportFrom(scope, name, attr_name, alias)
diff --git a/portage_with_autodep/pym/portage/proxy/objectproxy.py b/portage_with_autodep/pym/portage/proxy/objectproxy.py
new file mode 100644
index 0000000..92b36d1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/proxy/objectproxy.py
@@ -0,0 +1,91 @@
+# Copyright 2008-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+__all__ = ['ObjectProxy']
+
+class ObjectProxy(object):
+
+ """
+ Object that acts as a proxy to another object, forwarding
+ attribute accesses and method calls. This can be useful
+ for implementing lazy initialization.
+ """
+
+ __slots__ = ()
+
+ def _get_target(self):
+ raise NotImplementedError(self)
+
+ def __getattribute__(self, attr):
+ result = object.__getattribute__(self, '_get_target')()
+ return getattr(result, attr)
+
+ def __setattr__(self, attr, value):
+ result = object.__getattribute__(self, '_get_target')()
+ setattr(result, attr, value)
+
+ def __call__(self, *args, **kwargs):
+ result = object.__getattribute__(self, '_get_target')()
+ return result(*args, **kwargs)
+
+ def __setitem__(self, key, value):
+ object.__getattribute__(self, '_get_target')()[key] = value
+
+ def __getitem__(self, key):
+ return object.__getattribute__(self, '_get_target')()[key]
+
+ def __delitem__(self, key):
+ del object.__getattribute__(self, '_get_target')()[key]
+
+ def __contains__(self, key):
+ return key in object.__getattribute__(self, '_get_target')()
+
+ def __iter__(self):
+ return iter(object.__getattribute__(self, '_get_target')())
+
+ def __len__(self):
+ return len(object.__getattribute__(self, '_get_target')())
+
+ def __repr__(self):
+ return repr(object.__getattribute__(self, '_get_target')())
+
+ def __str__(self):
+ return str(object.__getattribute__(self, '_get_target')())
+
+ def __add__(self, other):
+ return self.__str__() + other
+
+ def __hash__(self):
+ return hash(object.__getattribute__(self, '_get_target')())
+
+ def __ge__(self, other):
+ return object.__getattribute__(self, '_get_target')() >= other
+
+ def __gt__(self, other):
+ return object.__getattribute__(self, '_get_target')() > other
+
+ def __le__(self, other):
+ return object.__getattribute__(self, '_get_target')() <= other
+
+ def __lt__(self, other):
+ return object.__getattribute__(self, '_get_target')() < other
+
+ def __eq__(self, other):
+ return object.__getattribute__(self, '_get_target')() == other
+
+ def __ne__(self, other):
+ return object.__getattribute__(self, '_get_target')() != other
+
+ def __bool__(self):
+ return bool(object.__getattribute__(self, '_get_target')())
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __unicode__(self):
+ return unicode(object.__getattribute__(self, '_get_target')())
+
+ def __int__(self):
+ return int(object.__getattribute__(self, '_get_target')())
diff --git a/portage_with_autodep/pym/portage/repository/__init__.py b/portage_with_autodep/pym/portage/repository/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/repository/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/repository/config.py b/portage_with_autodep/pym/portage/repository/config.py
new file mode 100644
index 0000000..9f0bb99
--- /dev/null
+++ b/portage_with_autodep/pym/portage/repository/config.py
@@ -0,0 +1,504 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import logging
+import re
+
+try:
+ from configparser import SafeConfigParser, ParsingError
+except ImportError:
+ from ConfigParser import SafeConfigParser, ParsingError
+from portage import os
+from portage.const import USER_CONFIG_PATH, REPO_NAME_LOC
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.util import normalize_path, writemsg, writemsg_level, shlex_split
+from portage.localization import _
+from portage import _unicode_encode
+from portage import _encodings
+
+_repo_name_sub_re = re.compile(r'[^\w-]')
+
+def _gen_valid_repo(name):
+ """
+ Substitute hyphen in place of characters that don't conform to PMS 3.1.5,
+ and strip hyphen from left side if necessary. This returns None if the
+ given name contains no valid characters.
+ """
+ name = _repo_name_sub_re.sub(' ', name.strip())
+ name = '-'.join(name.split())
+ name = name.lstrip('-')
+ if not name:
+ name = None
+ return name
+
+class RepoConfig(object):
+ """Stores config of one repository"""
+
+ __slots__ = ['aliases', 'eclass_overrides', 'eclass_locations', 'location', 'user_location', 'masters', 'main_repo',
+ 'missing_repo_name', 'name', 'priority', 'sync', 'format']
+
+ def __init__(self, name, repo_opts):
+ """Build a RepoConfig with options in repo_opts
+ Try to read repo_name in repository location, but if
+ it is not found use variable name as repository name"""
+ aliases = repo_opts.get('aliases')
+ if aliases is not None:
+ aliases = tuple(aliases.split())
+ self.aliases = aliases
+
+ eclass_overrides = repo_opts.get('eclass-overrides')
+ if eclass_overrides is not None:
+ eclass_overrides = tuple(eclass_overrides.split())
+ self.eclass_overrides = eclass_overrides
+ #Locations are computed later.
+ self.eclass_locations = None
+
+ #Masters are only read from layout.conf.
+ self.masters = None
+
+ #The main-repo key makes only sense for the 'DEFAULT' section.
+ self.main_repo = repo_opts.get('main-repo')
+
+ priority = repo_opts.get('priority')
+ if priority is not None:
+ try:
+ priority = int(priority)
+ except ValueError:
+ priority = None
+ self.priority = priority
+
+ sync = repo_opts.get('sync')
+ if sync is not None:
+ sync = sync.strip()
+ self.sync = sync
+
+ format = repo_opts.get('format')
+ if format is not None:
+ format = format.strip()
+ self.format = format
+
+ location = repo_opts.get('location')
+ self.user_location = location
+ if location is not None and location.strip():
+ if os.path.isdir(location):
+ location = os.path.realpath(location)
+ else:
+ location = None
+ self.location = location
+
+ missing = True
+ if self.location is not None:
+ name, missing = self._read_repo_name(self.location)
+ # We must ensure that the name conforms to PMS 3.1.5
+ # in order to avoid InvalidAtom exceptions when we
+ # use it to generate atoms.
+ name = _gen_valid_repo(name)
+ if not name:
+ # name only contains invalid characters
+ name = "x-" + os.path.basename(self.location)
+ name = _gen_valid_repo(name)
+ # If basename only contains whitespace then the
+ # end result is name = 'x-'.
+
+ elif name == "DEFAULT":
+ missing = False
+ self.name = name
+ self.missing_repo_name = missing
+
+ def update(self, new_repo):
+ """Update repository with options in another RepoConfig"""
+ if new_repo.aliases is not None:
+ self.aliases = new_repo.aliases
+ if new_repo.eclass_overrides is not None:
+ self.eclass_overrides = new_repo.eclass_overrides
+ if new_repo.masters is not None:
+ self.masters = new_repo.masters
+ if new_repo.name is not None:
+ self.name = new_repo.name
+ self.missing_repo_name = new_repo.missing_repo_name
+ if new_repo.user_location is not None:
+ self.user_location = new_repo.user_location
+ if new_repo.location is not None:
+ self.location = new_repo.location
+ if new_repo.priority is not None:
+ self.priority = new_repo.priority
+ if new_repo.sync is not None:
+ self.sync = new_repo.sync
+
+ def _read_repo_name(self, repo_path):
+ """
+ Read repo_name from repo_path.
+ Returns repo_name, missing.
+ """
+ repo_name_path = os.path.join(repo_path, REPO_NAME_LOC)
+ try:
+ return io.open(
+ _unicode_encode(repo_name_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace').readline().strip(), False
+ except EnvironmentError:
+ return "x-" + os.path.basename(repo_path), True
+
+ def info_string(self):
+ """
+ Returns a formatted string containing informations about the repository.
+ Used by emerge --info.
+ """
+ indent = " " * 4
+ repo_msg = []
+ repo_msg.append(self.name)
+ if self.format:
+ repo_msg.append(indent + "format: " + self.format)
+ if self.user_location:
+ repo_msg.append(indent + "location: " + self.user_location)
+ if self.sync:
+ repo_msg.append(indent + "sync: " + self.sync)
+ if self.masters:
+ repo_msg.append(indent + "masters: " + " ".join(master.name for master in self.masters))
+ if self.priority is not None:
+ repo_msg.append(indent + "priority: " + str(self.priority))
+ if self.aliases:
+ repo_msg.append(indent + "aliases: " + " ".join(self.aliases))
+ if self.eclass_overrides:
+ repo_msg.append(indent + "eclass_overrides: " + \
+ " ".join(self.eclass_overrides))
+ repo_msg.append("")
+ return "\n".join(repo_msg)
+
+class RepoConfigLoader(object):
+ """Loads and store config of several repositories, loaded from PORTDIR_OVERLAY or repos.conf"""
+ def __init__(self, paths, settings):
+ """Load config from files in paths"""
+ def parse(paths, prepos, ignored_map, ignored_location_map):
+ """Parse files in paths to load config"""
+ parser = SafeConfigParser()
+ try:
+ parser.read(paths)
+ except ParsingError as e:
+ writemsg(_("!!! Error while reading repo config file: %s\n") % e, noiselevel=-1)
+ prepos['DEFAULT'] = RepoConfig("DEFAULT", parser.defaults())
+ for sname in parser.sections():
+ optdict = {}
+ for oname in parser.options(sname):
+ optdict[oname] = parser.get(sname, oname)
+
+ repo = RepoConfig(sname, optdict)
+ if repo.location and not os.path.exists(repo.location):
+ writemsg(_("!!! Invalid repos.conf entry '%s'"
+ " (not a dir): '%s'\n") % (sname, repo.location), noiselevel=-1)
+ continue
+
+ if repo.name in prepos:
+ old_location = prepos[repo.name].location
+ if old_location is not None and repo.location is not None and old_location != repo.location:
+ ignored_map.setdefault(repo.name, []).append(old_location)
+ ignored_location_map[old_location] = repo.name
+ prepos[repo.name].update(repo)
+ else:
+ prepos[repo.name] = repo
+
+ def add_overlays(portdir, portdir_overlay, prepos, ignored_map, ignored_location_map):
+ """Add overlays in PORTDIR_OVERLAY as repositories"""
+ overlays = []
+ if portdir:
+ portdir = normalize_path(portdir)
+ overlays.append(portdir)
+ port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
+ overlays.extend(port_ov)
+ default_repo_opts = {}
+ if prepos['DEFAULT'].aliases is not None:
+ default_repo_opts['aliases'] = \
+ ' '.join(prepos['DEFAULT'].aliases)
+ if prepos['DEFAULT'].eclass_overrides is not None:
+ default_repo_opts['eclass-overrides'] = \
+ ' '.join(prepos['DEFAULT'].eclass_overrides)
+ if prepos['DEFAULT'].masters is not None:
+ default_repo_opts['masters'] = \
+ ' '.join(prepos['DEFAULT'].masters)
+ if overlays:
+ #overlay priority is negative because we want them to be looked before any other repo
+ base_priority = 0
+ for ov in overlays:
+ if os.path.isdir(ov):
+ repo_opts = default_repo_opts.copy()
+ repo_opts['location'] = ov
+ repo = RepoConfig(None, repo_opts)
+ repo_conf_opts = prepos.get(repo.name)
+ if repo_conf_opts is not None:
+ if repo_conf_opts.aliases is not None:
+ repo_opts['aliases'] = \
+ ' '.join(repo_conf_opts.aliases)
+ if repo_conf_opts.eclass_overrides is not None:
+ repo_opts['eclass-overrides'] = \
+ ' '.join(repo_conf_opts.eclass_overrides)
+ if repo_conf_opts.masters is not None:
+ repo_opts['masters'] = \
+ ' '.join(repo_conf_opts.masters)
+ repo = RepoConfig(repo.name, repo_opts)
+ if repo.name in prepos:
+ old_location = prepos[repo.name].location
+ if old_location is not None and old_location != repo.location:
+ ignored_map.setdefault(repo.name, []).append(old_location)
+ ignored_location_map[old_location] = repo.name
+ if old_location == portdir:
+ portdir = repo.user_location
+ prepos[repo.name].update(repo)
+ repo = prepos[repo.name]
+ else:
+ prepos[repo.name] = repo
+
+ if ov == portdir and portdir not in port_ov:
+ repo.priority = -1000
+ else:
+ repo.priority = base_priority
+ base_priority += 1
+
+ else:
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY"
+ " (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+ return portdir
+
+ def repo_priority(r):
+ """
+ Key funtion for comparing repositories by priority.
+ None is equal priority zero.
+ """
+ x = prepos[r].priority
+ if x is None:
+ return 0
+ return x
+
+ prepos = {}
+ location_map = {}
+ treemap = {}
+ ignored_map = {}
+ ignored_location_map = {}
+
+ portdir = settings.get('PORTDIR', '')
+ portdir_overlay = settings.get('PORTDIR_OVERLAY', '')
+ parse(paths, prepos, ignored_map, ignored_location_map)
+ # If PORTDIR_OVERLAY contains a repo with the same repo_name as
+ # PORTDIR, then PORTDIR is overridden.
+ portdir = add_overlays(portdir, portdir_overlay, prepos,
+ ignored_map, ignored_location_map)
+ if portdir and portdir.strip():
+ portdir = os.path.realpath(portdir)
+
+ ignored_repos = tuple((repo_name, tuple(paths)) \
+ for repo_name, paths in ignored_map.items())
+
+ self.missing_repo_names = frozenset(repo.location
+ for repo in prepos.values()
+ if repo.location is not None and repo.missing_repo_name)
+
+ #Parse layout.conf and read masters key.
+ for repo in prepos.values():
+ if not repo.location:
+ continue
+ layout_filename = os.path.join(repo.location, "metadata", "layout.conf")
+ layout_file = KeyValuePairFileLoader(layout_filename, None, None)
+ layout_data, layout_errors = layout_file.load()
+
+ masters = layout_data.get('masters')
+ if masters and masters.strip():
+ masters = masters.split()
+ else:
+ masters = None
+ repo.masters = masters
+
+ aliases = layout_data.get('aliases')
+ if aliases and aliases.strip():
+ aliases = aliases.split()
+ else:
+ aliases = None
+ if aliases:
+ if repo.aliases:
+ aliases.extend(repo.aliases)
+ repo.aliases = tuple(sorted(set(aliases)))
+
+ #Take aliases into account.
+ new_prepos = {}
+ for repo_name, repo in prepos.items():
+ names = set()
+ names.add(repo_name)
+ if repo.aliases:
+ names.update(repo.aliases)
+
+ for name in names:
+ if name in new_prepos:
+ writemsg_level(_("!!! Repository name or alias '%s', " + \
+ "defined for repository '%s', overrides " + \
+ "existing alias or repository.\n") % (name, repo_name), level=logging.WARNING, noiselevel=-1)
+ new_prepos[name] = repo
+ prepos = new_prepos
+
+ for (name, r) in prepos.items():
+ if r.location is not None:
+ location_map[r.location] = name
+ treemap[name] = r.location
+
+ # filter duplicates from aliases, by only including
+ # items where repo.name == key
+ prepos_order = [repo.name for key, repo in prepos.items() \
+ if repo.name == key and repo.location is not None]
+ prepos_order.sort(key=repo_priority)
+
+ if portdir in location_map:
+ portdir_repo = prepos[location_map[portdir]]
+ portdir_sync = settings.get('SYNC', '')
+ #if SYNC variable is set and not overwritten by repos.conf
+ if portdir_sync and not portdir_repo.sync:
+ portdir_repo.sync = portdir_sync
+
+ if prepos['DEFAULT'].main_repo is None or \
+ prepos['DEFAULT'].main_repo not in prepos:
+ #setting main_repo if it was not set in repos.conf
+ if portdir in location_map:
+ prepos['DEFAULT'].main_repo = location_map[portdir]
+ elif portdir in ignored_location_map:
+ prepos['DEFAULT'].main_repo = ignored_location_map[portdir]
+ else:
+ prepos['DEFAULT'].main_repo = None
+ writemsg(_("!!! main-repo not set in DEFAULT and PORTDIR is empty. \n"), noiselevel=-1)
+
+ self.prepos = prepos
+ self.prepos_order = prepos_order
+ self.ignored_repos = ignored_repos
+ self.location_map = location_map
+ self.treemap = treemap
+ self._prepos_changed = True
+ self._repo_location_list = []
+
+ #The 'masters' key currently contains repo names. Replace them with the matching RepoConfig.
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+ if repo.masters is None:
+ if self.mainRepo() and repo_name != self.mainRepo().name:
+ repo.masters = self.mainRepo(),
+ else:
+ repo.masters = ()
+ else:
+ if repo.masters and isinstance(repo.masters[0], RepoConfig):
+ # This one has already been processed
+ # because it has an alias.
+ continue
+ master_repos = []
+ for master_name in repo.masters:
+ if master_name not in prepos:
+ layout_filename = os.path.join(repo.user_location,
+ "metadata", "layout.conf")
+ writemsg_level(_("Unavailable repository '%s' " \
+ "referenced by masters entry in '%s'\n") % \
+ (master_name, layout_filename),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ master_repos.append(prepos[master_name])
+ repo.masters = tuple(master_repos)
+
+ #The 'eclass_overrides' key currently contains repo names. Replace them with the matching repo paths.
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+
+ eclass_locations = []
+ eclass_locations.extend(master_repo.location for master_repo in repo.masters)
+ eclass_locations.append(repo.location)
+
+ if repo.eclass_overrides:
+ for other_repo_name in repo.eclass_overrides:
+ if other_repo_name in self.treemap:
+ eclass_locations.append(self.get_location_for_name(other_repo_name))
+ else:
+ writemsg_level(_("Unavailable repository '%s' " \
+ "referenced by eclass-overrides entry for " \
+ "'%s'\n") % (other_repo_name, repo_name), \
+ level=logging.ERROR, noiselevel=-1)
+ repo.eclass_locations = tuple(eclass_locations)
+
+ self._prepos_changed = True
+ self._repo_location_list = []
+
+ self._check_locations()
+
+ def repoLocationList(self):
+ """Get a list of repositories location. Replaces PORTDIR_OVERLAY"""
+ if self._prepos_changed:
+ _repo_location_list = []
+ for repo in self.prepos_order:
+ if self.prepos[repo].location is not None:
+ _repo_location_list.append(self.prepos[repo].location)
+ self._repo_location_list = tuple(_repo_location_list)
+
+ self._prepos_changed = False
+ return self._repo_location_list
+
+ def repoUserLocationList(self):
+ """Get a list of repositories location. Replaces PORTDIR_OVERLAY"""
+ user_location_list = []
+ for repo in self.prepos_order:
+ if self.prepos[repo].location is not None:
+ user_location_list.append(self.prepos[repo].user_location)
+ return tuple(user_location_list)
+
+ def mainRepoLocation(self):
+ """Returns the location of main repo"""
+ main_repo = self.prepos['DEFAULT'].main_repo
+ if main_repo is not None and main_repo in self.prepos:
+ return self.prepos[main_repo].location
+ else:
+ return ''
+
+ def mainRepo(self):
+ """Returns the main repo"""
+ maid_repo = self.prepos['DEFAULT'].main_repo
+ if maid_repo is None:
+ return None
+ return self.prepos[maid_repo]
+
+ def _check_locations(self):
+ """Check if repositories location are correct and show a warning message if not"""
+ for (name, r) in self.prepos.items():
+ if name != 'DEFAULT':
+ if r.location is None:
+ writemsg(_("!!! Location not set for repository %s\n") % name, noiselevel=-1)
+ else:
+ if not os.path.isdir(r.location):
+ self.prepos_order.remove(name)
+ writemsg(_("!!! Invalid Repository Location"
+ " (not a dir): '%s'\n") % r.location, noiselevel=-1)
+
+ def repos_with_profiles(self):
+ for repo_name in self.prepos_order:
+ repo = self.prepos[repo_name]
+ if repo.format != "unavailable":
+ yield repo
+
+ def get_name_for_location(self, location):
+ return self.location_map[location]
+
+ def get_location_for_name(self, repo_name):
+ if repo_name is None:
+ # This simplifies code in places where
+ # we want to be able to pass in Atom.repo
+ # even if it is None.
+ return None
+ return self.treemap[repo_name]
+
+ def __getitem__(self, repo_name):
+ return self.prepos[repo_name]
+
+ def __iter__(self):
+ for repo_name in self.prepos_order:
+ yield self.prepos[repo_name]
+
+def load_repository_config(settings):
+ #~ repoconfigpaths = [os.path.join(settings.global_config_path, "repos.conf")]
+ repoconfigpaths = []
+ if settings.local_config:
+ repoconfigpaths.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH, "repos.conf"))
+ return RepoConfigLoader(repoconfigpaths, settings)
diff --git a/portage_with_autodep/pym/portage/tests/__init__.py b/portage_with_autodep/pym/portage/tests/__init__.py
new file mode 100644
index 0000000..a647aa2
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/__init__.py
@@ -0,0 +1,244 @@
+# tests/__init__.py -- Portage Unit Test functionality
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+import time
+import unittest
+
+try:
+ from unittest.runner import _TextTestResult # new in python-2.7
+except ImportError:
+ from unittest import _TextTestResult
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+
+def main():
+
+ TEST_FILE = b'__test__'
+ svn_dirname = b'.svn'
+ suite = unittest.TestSuite()
+ basedir = os.path.dirname(os.path.realpath(__file__))
+ testDirs = []
+
+ if len(sys.argv) > 1:
+ suite.addTests(getTestFromCommandLine(sys.argv[1:], basedir))
+ return TextTestRunner(verbosity=2).run(suite)
+
+ # the os.walk help mentions relative paths as being quirky
+ # I was tired of adding dirs to the list, so now we add __test__
+ # to each dir we want tested.
+ for root, dirs, files in os.walk(basedir):
+ if svn_dirname in dirs:
+ dirs.remove(svn_dirname)
+ try:
+ root = _unicode_decode(root,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+
+ if TEST_FILE in files:
+ testDirs.append(root)
+
+ for mydir in testDirs:
+ suite.addTests(getTests(os.path.join(basedir, mydir), basedir) )
+ return TextTestRunner(verbosity=2).run(suite)
+
+def my_import(name):
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def getTestFromCommandLine(args, base_path):
+ ret = []
+ for arg in args:
+ realpath = os.path.realpath(arg)
+ path = os.path.dirname(realpath)
+ f = realpath[len(path)+1:]
+
+ if not f.startswith("test") or not f.endswith(".py"):
+ raise Exception("Invalid argument: '%s'" % arg)
+
+ mymodule = f[:-3]
+
+ parent_path = path[len(base_path)+1:]
+ parent_module = ".".join(("portage", "tests", parent_path))
+ parent_module = parent_module.replace('/', '.')
+ result = []
+
+ # Make the trailing / a . for module importing
+ modname = ".".join((parent_module, mymodule))
+ mod = my_import(modname)
+ ret.append(unittest.TestLoader().loadTestsFromModule(mod))
+ return ret
+
+def getTests(path, base_path):
+ """
+
+ path is the path to a given subdir ( 'portage/' for example)
+ This does a simple filter on files in that dir to give us modules
+ to import
+
+ """
+ files = os.listdir(path)
+ files = [ f[:-3] for f in files if f.startswith("test") and f.endswith(".py") ]
+ parent_path = path[len(base_path)+1:]
+ parent_module = ".".join(("portage", "tests", parent_path))
+ parent_module = parent_module.replace('/', '.')
+ result = []
+ for mymodule in files:
+ # Make the trailing / a . for module importing
+ modname = ".".join((parent_module, mymodule))
+ mod = my_import(modname)
+ result.append(unittest.TestLoader().loadTestsFromModule(mod))
+ return result
+
+class TextTestResult(_TextTestResult):
+ """
+ We need a subclass of unittest._TextTestResult to handle tests with TODO
+
+ This just adds an addTodo method that can be used to add tests
+ that are marked TODO; these can be displayed later
+ by the test runner.
+ """
+
+ def __init__(self, stream, descriptions, verbosity):
+ super(TextTestResult, self).__init__(stream, descriptions, verbosity)
+ self.todoed = []
+
+ def addTodo(self, test, info):
+ self.todoed.append((test,info))
+ if self.showAll:
+ self.stream.writeln("TODO")
+ elif self.dots:
+ self.stream.write(".")
+
+ def printErrors(self):
+ if self.dots or self.showAll:
+ self.stream.writeln()
+ self.printErrorList('ERROR', self.errors)
+ self.printErrorList('FAIL', self.failures)
+ self.printErrorList('TODO', self.todoed)
+
+class TestCase(unittest.TestCase):
+ """
+ We need a way to mark a unit test as "ok to fail"
+ This way someone can add a broken test and mark it as failed
+ and then fix the code later. This may not be a great approach
+ (broken code!!??!11oneone) but it does happen at times.
+ """
+
+ def __init__(self, methodName='runTest'):
+ # This method exists because unittest.py in python 2.4 stores
+ # the methodName as __testMethodName while 2.5 uses
+ # _testMethodName.
+ self._testMethodName = methodName
+ unittest.TestCase.__init__(self, methodName)
+ self.todo = False
+
+ def defaultTestResult(self):
+ return TextTestResult()
+
+ def run(self, result=None):
+ if result is None: result = self.defaultTestResult()
+ result.startTest(self)
+ testMethod = getattr(self, self._testMethodName)
+ try:
+ try:
+ self.setUp()
+ except SystemExit:
+ raise
+ except KeyboardInterrupt:
+ raise
+ except:
+ result.addError(self, sys.exc_info())
+ return
+ ok = False
+ try:
+ testMethod()
+ ok = True
+ except self.failureException:
+ if self.todo:
+ result.addTodo(self,"%s: TODO" % testMethod)
+ else:
+ result.addFailure(self, sys.exc_info())
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ result.addError(self, sys.exc_info())
+ try:
+ self.tearDown()
+ except SystemExit:
+ raise
+ except KeyboardInterrupt:
+ raise
+ except:
+ result.addError(self, sys.exc_info())
+ ok = False
+ if ok: result.addSuccess(self)
+ finally:
+ result.stopTest(self)
+
+ def assertRaisesMsg(self, msg, excClass, callableObj, *args, **kwargs):
+ """Fail unless an exception of class excClass is thrown
+ by callableObj when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+ """
+ try:
+ callableObj(*args, **kwargs)
+ except excClass:
+ return
+ else:
+ if hasattr(excClass,'__name__'): excName = excClass.__name__
+ else: excName = str(excClass)
+ raise self.failureException("%s not raised: %s" % (excName, msg))
+
+class TextTestRunner(unittest.TextTestRunner):
+ """
+ We subclass unittest.TextTestRunner to output SKIP for tests that fail but are skippable
+ """
+
+ def _makeResult(self):
+ return TextTestResult(self.stream, self.descriptions, self.verbosity)
+
+ def run(self, test):
+ """
+ Run the given test case or test suite.
+ """
+ result = self._makeResult()
+ startTime = time.time()
+ test(result)
+ stopTime = time.time()
+ timeTaken = stopTime - startTime
+ result.printErrors()
+ self.stream.writeln(result.separator2)
+ run = result.testsRun
+ self.stream.writeln("Ran %d test%s in %.3fs" %
+ (run, run != 1 and "s" or "", timeTaken))
+ self.stream.writeln()
+ if not result.wasSuccessful():
+ self.stream.write("FAILED (")
+ failed = len(result.failures)
+ errored = len(result.errors)
+ if failed:
+ self.stream.write("failures=%d" % failed)
+ if errored:
+ if failed: self.stream.write(", ")
+ self.stream.write("errors=%d" % errored)
+ self.stream.writeln(")")
+ else:
+ self.stream.writeln("OK")
+ return result
+
+test_cps = ['sys-apps/portage','virtual/portage']
+test_versions = ['1.0', '1.0-r1','2.3_p4','1.0_alpha57']
+test_slots = [ None, '1','gentoo-sources-2.6.17','spankywashere']
+test_usedeps = ['foo','-bar', ('foo','bar'),
+ ('foo','-bar'), ('foo?', '!bar?') ]
diff --git a/portage_with_autodep/pym/portage/tests/bin/__init__.py b/portage_with_autodep/pym/portage/tests/bin/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/bin/__init__.py
diff --git a/portage_with_autodep/pym/portage/tests/bin/__test__ b/portage_with_autodep/pym/portage/tests/bin/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/bin/__test__
diff --git a/portage_with_autodep/pym/portage/tests/bin/setup_env.py b/portage_with_autodep/pym/portage/tests/bin/setup_env.py
new file mode 100644
index 0000000..e07643d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/bin/setup_env.py
@@ -0,0 +1,85 @@
+# setup_env.py -- Make sure bin subdir has sane env for testing
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage import shutil
+from portage.tests import TestCase
+from portage.process import spawn
+
+basepath = os.path.join(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__))),
+ "..", "..", "..")
+bindir = os.path.join(basepath, "bin")
+pymdir = os.path.join(basepath, "pym")
+basedir = None
+env = None
+
+def binTestsCleanup():
+ global basedir
+ if basedir is None:
+ return
+ if os.access(basedir, os.W_OK):
+ shutil.rmtree(basedir)
+ basedir = None
+
+def binTestsInit():
+ binTestsCleanup()
+ global basedir, env
+ basedir = tempfile.mkdtemp()
+ env = os.environ.copy()
+ env["D"] = os.path.join(basedir, "image")
+ env["T"] = os.path.join(basedir, "temp")
+ env["S"] = os.path.join(basedir, "workdir")
+ env["PF"] = "portage-tests-0.09-r1"
+ env["PATH"] = bindir + ":" + env["PATH"]
+ env["PORTAGE_BIN_PATH"] = bindir
+ env["PORTAGE_PYM_PATH"] = pymdir
+ os.mkdir(env["D"])
+ os.mkdir(env["T"])
+ os.mkdir(env["S"])
+
+class BinTestCase(TestCase):
+ def init(self):
+ binTestsInit()
+ def cleanup(self):
+ binTestsCleanup()
+
+def _exists_in_D(path):
+ # Note: do not use os.path.join() here, we assume D to end in /
+ return os.access(env["D"] + path, os.W_OK)
+def exists_in_D(path):
+ if not _exists_in_D(path):
+ raise TestCase.failureException
+def xexists_in_D(path):
+ if _exists_in_D(path):
+ raise TestCase.failureException
+
+def portage_func(func, args, exit_status=0):
+ # we don't care about the output of the programs,
+ # just their exit value and the state of $D
+ global env
+ f = open('/dev/null', 'wb')
+ fd_pipes = {0:0,1:f.fileno(),2:f.fileno()}
+ def pre_exec():
+ os.chdir(env["S"])
+ spawn([func] + args.split(), env=env,
+ fd_pipes=fd_pipes, pre_exec=pre_exec)
+ f.close()
+
+def create_portage_wrapper(bin):
+ def derived_func(*args):
+ newargs = list(args)
+ newargs.insert(0, bin)
+ return portage_func(*newargs)
+ return derived_func
+
+for bin in os.listdir(os.path.join(bindir, "ebuild-helpers")):
+ if bin.startswith("do") or \
+ bin.startswith("new") or \
+ bin.startswith("prep") or \
+ bin in ["ecompress","ecompressdir","fowners","fperms"]:
+ globals()[bin] = create_portage_wrapper(
+ os.path.join(bindir, "ebuild-helpers", bin))
diff --git a/portage_with_autodep/pym/portage/tests/bin/test_dobin.py b/portage_with_autodep/pym/portage/tests/bin/test_dobin.py
new file mode 100644
index 0000000..6f50d7a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/bin/test_dobin.py
@@ -0,0 +1,16 @@
+# test_dobin.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests.bin.setup_env import BinTestCase, dobin, xexists_in_D
+
+class DoBin(BinTestCase):
+ def testDoBin(self):
+ self.init()
+ try:
+ dobin("does-not-exist", 1)
+ xexists_in_D("does-not-exist")
+ xexists_in_D("/bin/does-not-exist")
+ xexists_in_D("/usr/bin/does-not-exist")
+ finally:
+ self.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/bin/test_dodir.py b/portage_with_autodep/pym/portage/tests/bin/test_dodir.py
new file mode 100644
index 0000000..f4eb9b2
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/bin/test_dodir.py
@@ -0,0 +1,16 @@
+# test_dodir.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests.bin.setup_env import BinTestCase, dodir, exists_in_D
+
+class DoDir(BinTestCase):
+ def testDoDir(self):
+ self.init()
+ try:
+ dodir("usr /usr")
+ exists_in_D("/usr")
+ dodir("/var/lib/moocow")
+ exists_in_D("/var/lib/moocow")
+ finally:
+ self.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/dbapi/__init__.py b/portage_with_autodep/pym/portage/tests/dbapi/__init__.py
new file mode 100644
index 0000000..532918b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dbapi/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/tests/dbapi/__test__ b/portage_with_autodep/pym/portage/tests/dbapi/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dbapi/__test__
diff --git a/portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py b/portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py
new file mode 100644
index 0000000..a2c5f77
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py
@@ -0,0 +1,58 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import tempfile
+
+from portage import os
+from portage.dbapi.virtual import fakedbapi
+from portage.package.ebuild.config import config
+from portage.tests import TestCase
+
+class TestFakedbapi(TestCase):
+
+ def testFakedbapi(self):
+ packages = (
+ ("sys-apps/portage-2.1.10", {
+ "EAPI" : "2",
+ "IUSE" : "ipc doc",
+ "repository" : "gentoo",
+ "SLOT" : "0",
+ "USE" : "ipc missing-iuse",
+ }),
+ ("virtual/package-manager-0", {
+ "EAPI" : "0",
+ "repository" : "gentoo",
+ "SLOT" : "0",
+ }),
+ )
+
+ match_tests = (
+ ("sys-apps/portage:0[ipc]", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0[-ipc]", []),
+ ("sys-apps/portage:0[doc]", []),
+ ("sys-apps/portage:0[-doc]", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0[missing-iuse]", []),
+ ("sys-apps/portage:0[-missing-iuse]", []),
+ ("sys-apps/portage:0::gentoo[ipc]", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0::multilib[ipc]", []),
+ ("virtual/package-manager", ["virtual/package-manager-0"]),
+ )
+
+ tempdir = tempfile.mkdtemp()
+ try:
+ portdir = os.path.join(tempdir, "usr/portage")
+ os.makedirs(portdir)
+ env = {
+ "PORTDIR": portdir,
+ }
+ fakedb = fakedbapi(settings=config(config_profile_path="",
+ env=env, _eprefix=tempdir))
+ for cpv, metadata in packages:
+ fakedb.cpv_inject(cpv, metadata=metadata)
+
+ for atom, expected_result in match_tests:
+ self.assertEqual( fakedb.match(atom), expected_result )
+ finally:
+ shutil.rmtree(tempdir)
diff --git a/portage_with_autodep/pym/portage/tests/dep/__init__.py b/portage_with_autodep/pym/portage/tests/dep/__init__.py
new file mode 100644
index 0000000..9c3f524
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.dep/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/tests/dep/__test__ b/portage_with_autodep/pym/portage/tests/dep/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/__test__
diff --git a/portage_with_autodep/pym/portage/tests/dep/testAtom.py b/portage_with_autodep/pym/portage/tests/dep/testAtom.py
new file mode 100644
index 0000000..092cacf
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/testAtom.py
@@ -0,0 +1,315 @@
+# Copyright 2006, 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+
+class TestAtom(TestCase):
+
+ def testAtom(self):
+
+ tests = (
+ ( "=sys-apps/portage-2.1-r1:0[doc,a=,!b=,c?,!d?,-e]",
+ ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', None), False, False ),
+ ( "=sys-apps/portage-2.1-r1*:0[doc]",
+ ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', None), False, False ),
+ ( "sys-apps/portage:0[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False ),
+ ( "sys-apps/portage:0[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False ),
+ ( "*/*",
+ (None, '*/*', None, None, None, None), True, False ),
+ ( "sys-apps/*",
+ (None, 'sys-apps/*', None, None, None, None), True, False ),
+ ( "*/portage",
+ (None, '*/portage', None, None, None, None), True, False ),
+ ( "s*s-*/portage:1",
+ (None, 's*s-*/portage', None, '1', None, None), True, False ),
+ ( "*/po*ge:2",
+ (None, '*/po*ge', None, '2', None, None), True, False ),
+ ( "!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True ),
+ ( "!!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True ),
+ ( "!!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True ),
+ ( "dev-libs/A[foo(+)]",
+ (None, 'dev-libs/A', None, None, "[foo(+)]", None), True, True ),
+ ( "dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ (None, 'dev-libs/A', None, None, "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True ),
+ ( "dev-libs/A:2[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ (None, 'dev-libs/A', None, "2", "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True ),
+
+ ( "=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]",
+ ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', 'repo_name'), False, True ),
+ ( "=sys-apps/portage-2.1-r1*:0::repo_name[doc]",
+ ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', 'repo_name'), False, True ),
+ ( "sys-apps/portage:0::repo_name[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', 'repo_name'), False, True ),
+
+ ( "*/*::repo_name",
+ (None, '*/*', None, None, None, 'repo_name'), True, True ),
+ ( "sys-apps/*::repo_name",
+ (None, 'sys-apps/*', None, None, None, 'repo_name'), True, True ),
+ ( "*/portage::repo_name",
+ (None, '*/portage', None, None, None, 'repo_name'), True, True ),
+ ( "s*s-*/portage:1::repo_name",
+ (None, 's*s-*/portage', None, '1', None, 'repo_name'), True, True ),
+ )
+
+ tests_xfail = (
+ ( Atom("sys-apps/portage"), False, False ),
+ ( "cat/pkg[a!]", False, False ),
+ ( "cat/pkg[!a]", False, False ),
+ ( "cat/pkg[!a!]", False, False ),
+ ( "cat/pkg[!a-]", False, False ),
+ ( "cat/pkg[-a=]", False, False ),
+ ( "cat/pkg[-a?]", False, False ),
+ ( "cat/pkg[-a!]", False, False ),
+ ( "cat/pkg[=a]", False, False ),
+ ( "cat/pkg[=a=]", False, False ),
+ ( "cat/pkg[=a?]", False, False ),
+ ( "cat/pkg[=a!]", False, False ),
+ ( "cat/pkg[=a-]", False, False ),
+ ( "cat/pkg[?a]", False, False ),
+ ( "cat/pkg[?a=]", False, False ),
+ ( "cat/pkg[?a?]", False, False ),
+ ( "cat/pkg[?a!]", False, False ),
+ ( "cat/pkg[?a-]", False, False ),
+ ( "sys-apps/portage[doc]:0", False, False ),
+ ( "*/*", False, False ),
+ ( "sys-apps/*", False, False ),
+ ( "*/portage", False, False ),
+ ( "*/**", True, False ),
+ ( "*/portage[use]", True, False ),
+ ( "cat/pkg[a()]", False, False ),
+ ( "cat/pkg[a(]", False, False ),
+ ( "cat/pkg[a)]", False, False ),
+ ( "cat/pkg[a(,b]", False, False ),
+ ( "cat/pkg[a),b]", False, False ),
+ ( "cat/pkg[a(*)]", False, False ),
+ ( "cat/pkg[a(*)]", True, False ),
+ ( "cat/pkg[a(+-)]", False, False ),
+ ( "cat/pkg[a()]", False, False ),
+ ( "cat/pkg[(+)a]", False, False ),
+ ( "cat/pkg[a=(+)]", False, False ),
+ ( "cat/pkg[!(+)a=]", False, False ),
+ ( "cat/pkg[!a=(+)]", False, False ),
+ ( "cat/pkg[a?(+)]", False, False ),
+ ( "cat/pkg[!a?(+)]", False, False ),
+ ( "cat/pkg[!(+)a?]", False, False ),
+ ( "cat/pkg[-(+)a]", False, False ),
+ ( "cat/pkg[a(+),-a]", False, False ),
+ ( "cat/pkg[a(-),-a]", False, False ),
+ ( "cat/pkg[-a,a(+)]", False, False ),
+ ( "cat/pkg[-a,a(-)]", False, False ),
+ ( "cat/pkg[-a(+),a(-)]", False, False ),
+ ( "cat/pkg[-a(-),a(+)]", False, False ),
+ ( "sys-apps/portage[doc]::repo_name", False, False ),
+ ( "sys-apps/portage:0[doc]::repo_name", False, False ),
+ ( "sys-apps/portage[doc]:0::repo_name", False, False ),
+ ( "=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", False, False ),
+ ( "=sys-apps/portage-2.1-r1*:0::repo_name[doc]", False, False ),
+ ( "sys-apps/portage:0::repo_name[doc]", False, False ),
+ ( "*/*::repo_name", True, False ),
+ )
+
+ for atom, parts, allow_wildcard, allow_repo in tests:
+ a = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+ op, cp, ver, slot, use, repo = parts
+ self.assertEqual( op, a.operator,
+ msg="Atom('%s').operator = %s == '%s'" % ( atom, a.operator, op ) )
+ self.assertEqual( cp, a.cp,
+ msg="Atom('%s').cp = %s == '%s'" % ( atom, a.cp, cp ) )
+ if ver is not None:
+ cpv = "%s-%s" % (cp, ver)
+ else:
+ cpv = cp
+ self.assertEqual( cpv, a.cpv,
+ msg="Atom('%s').cpv = %s == '%s'" % ( atom, a.cpv, cpv ) )
+ self.assertEqual( slot, a.slot,
+ msg="Atom('%s').slot = %s == '%s'" % ( atom, a.slot, slot ) )
+ self.assertEqual( repo, a.repo,
+ msg="Atom('%s').repo == %s == '%s'" % ( atom, a.repo, repo ) )
+
+ if a.use:
+ returned_use = str(a.use)
+ else:
+ returned_use = None
+ self.assertEqual( use, returned_use,
+ msg="Atom('%s').use = %s == '%s'" % ( atom, returned_use, use ) )
+
+ for atom, allow_wildcard, allow_repo in tests_xfail:
+ self.assertRaisesMsg(atom, (InvalidAtom, TypeError), Atom, atom, \
+ allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+
+ def test_intersects(self):
+ test_cases = (
+ ("dev-libs/A", "dev-libs/A", True),
+ ("dev-libs/A", "dev-libs/B", False),
+ ("dev-libs/A", "sci-libs/A", False),
+ ("dev-libs/A[foo]", "sci-libs/A[bar]", False),
+ ("dev-libs/A[foo(+)]", "sci-libs/A[foo(-)]", False),
+ ("=dev-libs/A-1", "=dev-libs/A-1-r1", False),
+ ("~dev-libs/A-1", "=dev-libs/A-1", False),
+ ("=dev-libs/A-1:1", "=dev-libs/A-1", True),
+ ("=dev-libs/A-1:1", "=dev-libs/A-1:1", True),
+ ("=dev-libs/A-1:1", "=dev-libs/A-1:2", False),
+ )
+
+ for atom, other, expected_result in test_cases:
+ self.assertEqual(Atom(atom).intersects(Atom(other)), expected_result, \
+ "%s and %s should intersect: %s" % (atom, other, expected_result))
+
+ def test_violated_conditionals(self):
+ test_cases = (
+ ("dev-libs/A", ["foo"], ["foo"], None, "dev-libs/A"),
+ ("dev-libs/A[foo]", [], ["foo"], None, "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], ["foo"], None, "dev-libs/A"),
+ ("dev-libs/A[foo]", [], ["foo"], [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], ["foo"], [], "dev-libs/A"),
+
+ ("dev-libs/A:0[foo]", ["foo"], ["foo"], [], "dev-libs/A:0"),
+
+ ("dev-libs/A[foo,-bar]", [], ["foo", "bar"], None, "dev-libs/A[foo]"),
+ ("dev-libs/A[-foo,bar]", [], ["foo", "bar"], None, "dev-libs/A[bar]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,b=,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,!e?]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,-f]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a,!c=,-f]"),
+
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c(+)=]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a(-),!c(-)=]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a(+),!c(+)=]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a(-),!c(-)=]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a(+),!c(+)=]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a(-),!c(-)=,-f(+)]"),
+
+ ("dev-libs/A[a(+),b(+)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a"], ["a"], "dev-libs/A[b(+)=,!e(+)?]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["b"], ["b"], "dev-libs/A[a(-),!c(-)=,-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["c"], ["c"], "dev-libs/A[!c(+)=,!e(+)?]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["d"], ["d"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["e"], ["e"], "dev-libs/A"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["f"], ["f"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"),
+
+ #Some more test cases to trigger all remaining code paths
+ ("dev-libs/B[x?]", [], ["x"], ["x"], "dev-libs/B[x?]"),
+ ("dev-libs/B[x(+)?]", [], [], ["x"], "dev-libs/B"),
+ ("dev-libs/B[x(-)?]", [], [], ["x"], "dev-libs/B[x(-)?]"),
+
+ ("dev-libs/C[x=]", [], ["x"], ["x"], "dev-libs/C[x=]"),
+ ("dev-libs/C[x(+)=]", [], [], ["x"], "dev-libs/C"),
+ ("dev-libs/C[x(-)=]", [], [], ["x"], "dev-libs/C[x(-)=]"),
+
+ ("dev-libs/D[!x=]", [], ["x"], ["x"], "dev-libs/D"),
+ ("dev-libs/D[!x(+)=]", [], [], ["x"], "dev-libs/D[!x(+)=]"),
+ ("dev-libs/D[!x(-)=]", [], [], ["x"], "dev-libs/D"),
+
+ #Missing IUSE test cases
+ ("dev-libs/B[x]", [], [], [], "dev-libs/B[x]"),
+ ("dev-libs/B[-x]", [], [], [], "dev-libs/B[-x]"),
+ ("dev-libs/B[x?]", [], [], [], "dev-libs/B[x?]"),
+ ("dev-libs/B[x=]", [], [], [], "dev-libs/B[x=]"),
+ ("dev-libs/B[!x=]", [], [], ["x"], "dev-libs/B[!x=]"),
+ ("dev-libs/B[!x?]", [], [], ["x"], "dev-libs/B[!x?]"),
+ )
+
+ test_cases_xfail = (
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], None),
+ )
+
+ class use_flag_validator(object):
+ def __init__(self, iuse):
+ self.iuse = iuse
+
+ def is_valid_flag(self, flag):
+ return flag in iuse
+
+ for atom, other_use, iuse, parent_use, expected_violated_atom in test_cases:
+ a = Atom(atom)
+ validator = use_flag_validator(iuse)
+ violated_atom = a.violated_conditionals(other_use, validator.is_valid_flag, parent_use)
+ if parent_use is None:
+ fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \
+ (atom, " ".join(other_use), " ".join(iuse), "None", str(violated_atom), expected_violated_atom)
+ else:
+ fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \
+ (atom, " ".join(other_use), " ".join(iuse), " ".join(parent_use), str(violated_atom), expected_violated_atom)
+ self.assertEqual(str(violated_atom), expected_violated_atom, fail_msg)
+
+ for atom, other_use, iuse, parent_use in test_cases_xfail:
+ a = Atom(atom)
+ validator = use_flag_validator(iuse)
+ self.assertRaisesMsg(atom, InvalidAtom, \
+ a.violated_conditionals, other_use, validator.is_valid_flag, parent_use)
+
+ def test_evaluate_conditionals(self):
+ test_cases = (
+ ("dev-libs/A[foo]", [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], "dev-libs/A[foo]"),
+
+ ("dev-libs/A:0[foo=]", ["foo"], "dev-libs/A:0[foo]"),
+
+ ("dev-libs/A[foo,-bar]", [], "dev-libs/A[foo,-bar]"),
+ ("dev-libs/A[-foo,bar]", [], "dev-libs/A[-foo,bar]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], "dev-libs/A[a,-b,c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], "dev-libs/A[a,-b,c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], "dev-libs/A[a,b,c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], "dev-libs/A[a,-b,-c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], "dev-libs/A[a,-b,c,d,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], "dev-libs/A[a,-b,c,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], "dev-libs/A[a,-b,c,-e,-f]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], "dev-libs/A[a(-),-b(+),c(-),d(+),-e(-),-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["f"], "dev-libs/A[a(+),-b(-),c(+),-e(+),-f(-)]"),
+ )
+
+ for atom, use, expected_atom in test_cases:
+ a = Atom(atom)
+ b = a.evaluate_conditionals(use)
+ self.assertEqual(str(b), expected_atom)
+ self.assertEqual(str(b.unevaluated_atom), atom)
+
+ def test__eval_qa_conditionals(self):
+ test_cases = (
+ ("dev-libs/A[foo]", [], [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", [], ["foo"], "dev-libs/A[foo]"),
+
+ ("dev-libs/A:0[foo]", [], [], "dev-libs/A:0[foo]"),
+ ("dev-libs/A:0[foo]", ["foo"], [], "dev-libs/A:0[foo]"),
+ ("dev-libs/A:0[foo]", [], ["foo"], "dev-libs/A:0[foo]"),
+ ("dev-libs/A:0[foo=]", [], ["foo"], "dev-libs/A:0[foo]"),
+
+ ("dev-libs/A[foo,-bar]", ["foo"], ["bar"], "dev-libs/A[foo,-bar]"),
+ ("dev-libs/A[-foo,bar]", ["foo", "bar"], [], "dev-libs/A[-foo,bar]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a", "b", "c"], [], "dev-libs/A[a,-b,c,d,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c"], "dev-libs/A[a,b,-c,d,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d", "e", "f"], [], "dev-libs/A[a,b,-b,c,-c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["d", "e", "f"], "dev-libs/A[a,b,-b,c,-c,d,-f]"),
+
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", \
+ ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a(-),-b(+),c(-),-e(-),-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", \
+ [], ["a", "b", "c", "d", "e", "f"], "dev-libs/A[a(+),b(-),-c(+),d(-),-f(-)]"),
+ )
+
+ for atom, use_mask, use_force, expected_atom in test_cases:
+ a = Atom(atom)
+ b = a._eval_qa_conditionals(use_mask, use_force)
+ self.assertEqual(str(b), expected_atom)
+ self.assertEqual(str(b.unevaluated_atom), atom)
diff --git a/portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py b/portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py
new file mode 100644
index 0000000..54791e0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py
@@ -0,0 +1,219 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import check_required_use
+from portage.exception import InvalidDependString
+
+class TestCheckRequiredUse(TestCase):
+
+ def testCheckRequiredUse(self):
+ test_cases = (
+ ( "|| ( a b )", [], ["a", "b"], False),
+ ( "|| ( a b )", ["a"], ["a", "b"], True),
+ ( "|| ( a b )", ["b"], ["a", "b"], True),
+ ( "|| ( a b )", ["a", "b"], ["a", "b"], True),
+
+ ( "^^ ( a b )", [], ["a", "b"], False),
+ ( "^^ ( a b )", ["a"], ["a", "b"], True),
+ ( "^^ ( a b )", ["b"], ["a", "b"], True),
+ ( "^^ ( a b )", ["a", "b"], ["a", "b"], False),
+
+ ( "^^ ( || ( a b ) c )", [], ["a", "b", "c"], False),
+ ( "^^ ( || ( a b ) c )", ["a"], ["a", "b", "c"], True),
+
+ ( "^^ ( || ( ( a b ) ) ( c ) )", [], ["a", "b", "c"], False),
+ ( "( ^^ ( ( || ( ( a ) ( b ) ) ) ( ( c ) ) ) )", ["a"], ["a", "b", "c"], True),
+
+ ( "a || ( b c )", ["a"], ["a", "b", "c"], False),
+ ( "|| ( b c ) a", ["a"], ["a", "b", "c"], False),
+
+ ( "|| ( a b c )", ["a"], ["a", "b", "c"], True),
+ ( "|| ( a b c )", ["b"], ["a", "b", "c"], True),
+ ( "|| ( a b c )", ["c"], ["a", "b", "c"], True),
+
+ ( "^^ ( a b c )", ["a"], ["a", "b", "c"], True),
+ ( "^^ ( a b c )", ["b"], ["a", "b", "c"], True),
+ ( "^^ ( a b c )", ["c"], ["a", "b", "c"], True),
+ ( "^^ ( a b c )", ["a", "b"], ["a", "b", "c"], False),
+ ( "^^ ( a b c )", ["b", "c"], ["a", "b", "c"], False),
+ ( "^^ ( a b c )", ["a", "c"], ["a", "b", "c"], False),
+ ( "^^ ( a b c )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ( "a? ( ^^ ( b c ) )", [], ["a", "b", "c"], True),
+ ( "a? ( ^^ ( b c ) )", ["a"], ["a", "b", "c"], False),
+ ( "a? ( ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+ ( "a? ( ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ( "a? ( ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+ ( "a? ( ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ( "^^ ( a? ( !b ) !c? ( d ) )", [], ["a", "b", "c", "d"], False),
+ ( "^^ ( a? ( !b ) !c? ( d ) )", ["a"], ["a", "b", "c", "d"], True),
+ ( "^^ ( a? ( !b ) !c? ( d ) )", ["c"], ["a", "b", "c", "d"], True),
+ ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "c"], ["a", "b", "c", "d"], True),
+ ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "c"], ["a", "b", "c", "d"], False),
+ ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+ ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+ ( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "d"], ["a", "b", "c", "d"], False),
+
+ ( "|| ( ^^ ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+ ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+ ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+ ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+ ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], True),
+ ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+ ( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ( "^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+ ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+ ( "^^ ( || ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], False),
+ ( "^^ ( || ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], False),
+ ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], False),
+ ( "^^ ( || ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+ ( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], True),
+
+ ( "|| ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], True),
+ ( "|| ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+ ( "|| ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+ ( "|| ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+ ( "|| ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+ ( "|| ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+ ( "|| ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+ ( "|| ( ( a b ) c )", [], ["a", "b", "c"], False),
+
+ ( "^^ ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], False),
+ ( "^^ ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+ ( "^^ ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+ ( "^^ ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+ ( "^^ ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+ ( "^^ ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+ ( "^^ ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+ ( "^^ ( ( a b ) c )", [], ["a", "b", "c"], False),
+ )
+
+ test_cases_xfail = (
+ ( "^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b"]),
+ ( "^^ ( || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+ ( "^^( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ( "^^ || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+ ( "^^ ( ( || ) ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ( "^^ ( || ( a b ) ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ )
+
+ for required_use, use, iuse, expected in test_cases:
+ self.assertEqual(bool(check_required_use(required_use, use, iuse.__contains__)), \
+ expected, required_use + ", USE = " + " ".join(use))
+
+ for required_use, use, iuse in test_cases_xfail:
+ self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
+ InvalidDependString, check_required_use, required_use, use, iuse.__contains__)
+
+ def testCheckRequiredUseFilterSatisfied(self):
+ """
+ Test filtering of satisfied parts of REQUIRED_USE,
+ in order to reduce noise for bug #353234.
+ """
+ test_cases = (
+ (
+ "bindist? ( !amr !faac !win32codecs ) cdio? ( !cdparanoia !cddb ) dvdnav? ( dvd )",
+ ("cdio", "cdparanoia"),
+ "cdio? ( !cdparanoia )"
+ ),
+ (
+ "|| ( !amr !faac !win32codecs ) cdio? ( !cdparanoia !cddb ) ^^ ( foo bar )",
+ ["cdio", "cdparanoia", "foo"],
+ "cdio? ( !cdparanoia )"
+ ),
+ (
+ "^^ ( || ( a b ) c )",
+ ("a", "b", "c"),
+ "^^ ( || ( a b ) c )"
+ ),
+ (
+ "^^ ( || ( ( a b ) ) ( c ) )",
+ ("a", "b", "c"),
+ "^^ ( ( a b ) c )"
+ ),
+ (
+ "a? ( ( c e ) ( b d ) )",
+ ("a", "c", "e"),
+ "a? ( b d )"
+ ),
+ (
+ "a? ( ( c e ) ( b d ) )",
+ ("a", "b", "c", "e"),
+ "a? ( d )"
+ ),
+ (
+ "a? ( ( c e ) ( c e b c d e c ) )",
+ ("a", "c", "e"),
+ "a? ( b d )"
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ("a", "b"),
+ "^^ ( || ( a b ) ^^ ( b c ) )"
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ["a", "c"],
+ "^^ ( || ( a b ) ^^ ( b c ) )"
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ["b", "c"],
+ ""
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ["a", "b", "c"],
+ ""
+ ),
+ (
+ "^^ ( ( a b c ) ( b c d ) )",
+ ["a", "b", "c"],
+ ""
+ ),
+ (
+ "^^ ( ( a b c ) ( b c d ) )",
+ ["a", "b", "c", "d"],
+ "^^ ( ( a b c ) ( b c d ) )"
+ ),
+ (
+ "^^ ( ( a b c ) ( b c !d ) )",
+ ["a", "b", "c"],
+ "^^ ( ( a b c ) ( b c !d ) )"
+ ),
+ (
+ "^^ ( ( a b c ) ( b c !d ) )",
+ ["a", "b", "c", "d"],
+ ""
+ ),
+ (
+ "( ( ( a ) ) ( ( ( b c ) ) ) )",
+ [""],
+ "a b c"
+ ),
+ (
+ "|| ( ( ( ( a ) ) ( ( ( b c ) ) ) ) )",
+ [""],
+ "a b c"
+ ),
+ (
+ "|| ( ( a ( ( ) ( ) ) ( ( ) ) ( b ( ) c ) ) )",
+ [""],
+ "a b c"
+ ),
+ (
+ "|| ( ( a b c ) ) || ( ( d e f ) )",
+ [""],
+ "a b c d e f"
+ ),
+ )
+ for required_use, use, expected in test_cases:
+ result = check_required_use(required_use, use, lambda k: True).tounicode()
+ self.assertEqual(result, expected,
+ "REQUIRED_USE = '%s', USE = '%s', '%s' != '%s'" % \
+ (required_use, " ".join(use), result, expected))
diff --git a/portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py b/portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py
new file mode 100644
index 0000000..69d092e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py
@@ -0,0 +1,18 @@
+# test_isvalidatom.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import ExtendedAtomDict
+
+class TestExtendedAtomDict(TestCase):
+
+ def testExtendedAtomDict(self):
+ d = ExtendedAtomDict(dict)
+ d["*/*"] = { "test1": "x" }
+ d["dev-libs/*"] = { "test2": "y" }
+ d.setdefault("sys-apps/portage", {})["test3"] = "z"
+ self.assertEqual(d.get("dev-libs/A"), { "test1": "x", "test2": "y" })
+ self.assertEqual(d.get("sys-apps/portage"), { "test1": "x", "test3": "z" })
+ self.assertEqual(d["dev-libs/*"], { "test2": "y" })
+ self.assertEqual(d["sys-apps/portage"], {'test1': 'x', 'test3': 'z'})
diff --git a/portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py b/portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py
new file mode 100644
index 0000000..026a552
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py
@@ -0,0 +1,75 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import extract_affecting_use
+from portage.exception import InvalidDependString
+
+class TestExtractAffectingUSE(TestCase):
+
+ def testExtractAffectingUSE(self):
+ test_cases = (
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "A", ("a",)),
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "B", ("b",)),
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "C", ("c",)),
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "D", ("d",)),
+
+ ("a? ( b? ( AB ) )", "AB", ("a", "b")),
+ ("a? ( b? ( c? ( ABC ) ) )", "ABC", ("a", "b", "c")),
+
+ ("a? ( A b? ( c? ( ABC ) AB ) )", "A", ("a",)),
+ ("a? ( A b? ( c? ( ABC ) AB ) )", "AB", ("a", "b")),
+ ("a? ( A b? ( c? ( ABC ) AB ) )", "ABC", ("a", "b", "c")),
+ ("a? ( A b? ( c? ( ABC ) AB ) ) X", "X", []),
+ ("X a? ( A b? ( c? ( ABC ) AB ) )", "X", []),
+
+ ("ab? ( || ( A B ) )", "A", ("ab",)),
+ ("!ab? ( || ( A B ) )", "B", ("ab",)),
+ ("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "A", ("ab",)),
+ ("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "B", ("ab", "b")),
+ ("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "C", ("ab", "b")),
+
+ ("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "A", ("ab",)),
+ ("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "B", ("ab", "b")),
+ ("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "C", ("ab", "b")),
+
+ ("a? ( A )", "B", []),
+
+ ("a? ( || ( A B ) )", "B", ["a"]),
+
+ # test USE dep defaults for bug #363073
+ ("a? ( >=dev-lang/php-5.2[pcre(+)] )", ">=dev-lang/php-5.2[pcre(+)]", ["a"]),
+ )
+
+ test_cases_xfail = (
+ ("? ( A )", "A"),
+ ("!? ( A )", "A"),
+ ("( A", "A"),
+ ("A )", "A"),
+
+ ("||( A B )", "A"),
+ ("|| (A B )", "A"),
+ ("|| ( A B)", "A"),
+ ("|| ( A B", "A"),
+ ("|| A B )", "A"),
+ ("|| A B", "A"),
+ ("|| ( A B ) )", "A"),
+ ("|| || B C", "A"),
+ ("|| ( A B || )", "A"),
+ ("a? A", "A"),
+ ("( || ( || || ( A ) foo? ( B ) ) )", "A"),
+ ("( || ( || bar? ( A ) foo? ( B ) ) )", "A"),
+ )
+
+ for dep, atom, expected in test_cases:
+ expected = set(expected)
+ result = extract_affecting_use(dep, atom, eapi="0")
+ fail_msg = "dep: " + dep + ", atom: " + atom + ", got: " + \
+ " ".join(sorted(result)) + ", expected: " + " ".join(sorted(expected))
+ self.assertEqual(result, expected, fail_msg)
+
+ for dep, atom in test_cases_xfail:
+ fail_msg = "dep: " + dep + ", atom: " + atom + ", got: " + \
+ " ".join(sorted(result)) + ", expected: " + " ".join(sorted(expected))
+ self.assertRaisesMsg(fail_msg, \
+ InvalidDependString, extract_affecting_use, dep, atom, eapi="0")
diff --git a/portage_with_autodep/pym/portage/tests/dep/testStandalone.py b/portage_with_autodep/pym/portage/tests/dep/testStandalone.py
new file mode 100644
index 0000000..e9f01df
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/testStandalone.py
@@ -0,0 +1,36 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import cpvequal
+from portage.exception import PortageException
+
+class TestStandalone(TestCase):
+ """ Test some small functions portage.dep
+ """
+
+ def testCPVequal(self):
+
+ test_cases = (
+ ( "sys-apps/portage-2.1","sys-apps/portage-2.1", True ),
+ ( "sys-apps/portage-2.1","sys-apps/portage-2.0", False ),
+ ( "sys-apps/portage-2.1","sys-apps/portage-2.1-r1", False ),
+ ( "sys-apps/portage-2.1-r1","sys-apps/portage-2.1", False ),
+ ( "sys-apps/portage-2.1_alpha3","sys-apps/portage-2.1", False ),
+ ( "sys-apps/portage-2.1_alpha3_p6","sys-apps/portage-2.1_alpha3", False ),
+ ( "sys-apps/portage-2.1_alpha3","sys-apps/portage-2.1", False ),
+ ( "sys-apps/portage-2.1","sys-apps/X-2.1", False ),
+ ( "sys-apps/portage-2.1","portage-2.1", False ),
+ )
+
+ test_cases_xfail = (
+ ( "sys-apps/portage","sys-apps/portage" ),
+ ( "sys-apps/portage-2.1-6","sys-apps/portage-2.1-6" ),
+ )
+
+ for cpv1, cpv2, expected_result in test_cases:
+ self.assertEqual(cpvequal(cpv1, cpv2), expected_result)
+
+ for cpv1, cpv2 in test_cases_xfail:
+ self.assertRaisesMsg("cpvequal("+cpv1+", "+cpv2+")", \
+ PortageException, cpvequal, cpv1, cpv2)
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py b/portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py
new file mode 100644
index 0000000..d050adc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py
@@ -0,0 +1,43 @@
+# test_best_match_to_list.py -- Portage Unit Testing Functionality
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import Atom, best_match_to_list
+
+class Test_best_match_to_list(TestCase):
+
+ def best_match_to_list_wrapper(self, mypkg, mylist):
+ """
+ This function uses best_match_to_list to create sorted
+ list of matching atoms.
+ """
+ ret = []
+ while mylist:
+ m = best_match_to_list(mypkg, mylist)
+ if m is not None:
+ ret.append(m)
+ mylist.remove(m)
+ else:
+ break
+
+ return ret
+
+ def testBest_match_to_list(self):
+ tests = [
+ ("dev-libs/A-1", [Atom("dev-libs/A"), Atom("=dev-libs/A-1")], \
+ [Atom("=dev-libs/A-1"), Atom("dev-libs/A")]),
+ ("dev-libs/A-1", [Atom("dev-libs/B"), Atom("=dev-libs/A-1:0")], \
+ [Atom("=dev-libs/A-1:0")]),
+ ("dev-libs/A-1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=dev-libs/A-1:0")], \
+ [Atom("=dev-libs/A-1:0"), Atom("dev-libs/*", allow_wildcard=True)]),
+ ("dev-libs/A-1:0", [Atom("dev-*/*", allow_wildcard=True), Atom("dev-*/*:0", allow_wildcard=True),\
+ Atom("dev-libs/A"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A:0"), \
+ Atom("=dev-libs/A-1*"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1")], \
+ [Atom("=dev-libs/A-1"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1*"), \
+ Atom("dev-libs/A:0"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A"), \
+ Atom("dev-*/*:0", allow_wildcard=True), Atom("dev-*/*", allow_wildcard=True)])
+ ]
+
+ for pkg, atom_list, result in tests:
+ self.assertEqual( self.best_match_to_list_wrapper( pkg, atom_list ), result )
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py
new file mode 100644
index 0000000..8a0a8aa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py
@@ -0,0 +1,35 @@
+# test_dep_getcpv.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getcpv
+
+class DepGetCPV(TestCase):
+ """ A simple testcase for isvalidatom
+ """
+
+ def testDepGetCPV(self):
+
+ prefix_ops = ["<", ">", "=", "~", "<=",
+ ">=", "!=", "!<", "!>", "!~"]
+
+ bad_prefix_ops = [ ">~", "<~", "~>", "~<" ]
+ postfix_ops = [ ("=", "*"), ]
+
+ cpvs = ["sys-apps/portage-2.1", "sys-apps/portage-2.1",
+ "sys-apps/portage-2.1"]
+ slots = [None, ":foo", ":2"]
+ for cpv in cpvs:
+ for slot in slots:
+ for prefix in prefix_ops:
+ mycpv = prefix + cpv
+ if slot:
+ mycpv += slot
+ self.assertEqual( dep_getcpv( mycpv ), cpv )
+
+ for prefix, postfix in postfix_ops:
+ mycpv = prefix + cpv + postfix
+ if slot:
+ mycpv += slot
+ self.assertEqual( dep_getcpv( mycpv ), cpv )
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py
new file mode 100644
index 0000000..78ead8c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py
@@ -0,0 +1,29 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getrepo
+
+class DepGetRepo(TestCase):
+ """ A simple testcase for isvalidatom
+ """
+
+ def testDepGetRepo(self):
+
+ repo_char = "::"
+ repos = ( "a", "repo-name", "repo_name", "repo123", None )
+ cpvs = ["sys-apps/portage"]
+ versions = ["2.1.1","2.1-r1", None]
+ uses = ["[use]", None]
+ for cpv in cpvs:
+ for version in versions:
+ for use in uses:
+ for repo in repos:
+ pkg = cpv
+ if version:
+ pkg = '=' + pkg + '-' + version
+ if repo is not None:
+ pkg = pkg + repo_char + repo
+ if use:
+ pkg = pkg + use
+ self.assertEqual( dep_getrepo( pkg ), repo )
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py
new file mode 100644
index 0000000..206cecc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py
@@ -0,0 +1,28 @@
+# test_dep_getslot.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getslot
+
+class DepGetSlot(TestCase):
+ """ A simple testcase for isvalidatom
+ """
+
+ def testDepGetSlot(self):
+
+ slot_char = ":"
+ slots = ( "a", "1.2", "1", "IloveVapier", None )
+ cpvs = ["sys-apps/portage"]
+ versions = ["2.1.1","2.1-r1"]
+ for cpv in cpvs:
+ for version in versions:
+ for slot in slots:
+ mycpv = cpv
+ if version:
+ mycpv = '=' + mycpv + '-' + version
+ if slot is not None:
+ self.assertEqual( dep_getslot(
+ mycpv + slot_char + slot ), slot )
+ else:
+ self.assertEqual( dep_getslot( mycpv ), slot )
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py
new file mode 100644
index 0000000..d2494f7
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py
@@ -0,0 +1,35 @@
+# test_dep_getusedeps.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getusedeps
+
+from portage.tests import test_cps, test_slots, test_versions, test_usedeps
+
+class DepGetUseDeps(TestCase):
+ """ A simple testcase for dep_getusedeps
+ """
+
+ def testDepGetUseDeps(self):
+
+ for mycpv in test_cps:
+ for version in test_versions:
+ for slot in test_slots:
+ for use in test_usedeps:
+ cpv = mycpv[:]
+ if version:
+ cpv += version
+ if slot:
+ cpv += ":" + slot
+ if isinstance(use, tuple):
+ cpv += "[%s]" % (",".join(use),)
+ self.assertEqual( dep_getusedeps(
+ cpv ), use )
+ else:
+ if len(use):
+ self.assertEqual( dep_getusedeps(
+ cpv + "[" + use + "]" ), (use,) )
+ else:
+ self.assertEqual( dep_getusedeps(
+ cpv + "[" + use + "]" ), () )
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_get_operator.py b/portage_with_autodep/pym/portage/tests/dep/test_get_operator.py
new file mode 100644
index 0000000..4f9848f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_get_operator.py
@@ -0,0 +1,33 @@
+# test_get_operator.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import get_operator
+
+class GetOperator(TestCase):
+
+ def testGetOperator(self):
+
+ # get_operator does not validate operators
+ tests = [ ( "~", "~" ), ( "=", "=" ), ( ">", ">" ),
+ ( ">=", ">=" ), ( "<=", "<=" ),
+ ]
+
+ test_cpvs = ["sys-apps/portage-2.1"]
+ slots = [ None,"1","linux-2.5.6" ]
+ for cpv in test_cpvs:
+ for test in tests:
+ for slot in slots:
+ atom = cpv[:]
+ if slot:
+ atom += ":" + slot
+ result = get_operator( test[0] + atom )
+ self.assertEqual( result, test[1],
+ msg="get_operator(%s) != %s" % (test[0] + atom, test[1]) )
+
+ result = get_operator( "sys-apps/portage" )
+ self.assertEqual( result, None )
+
+ result = get_operator( "=sys-apps/portage-2.1*" )
+ self.assertEqual( result , "=*" )
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py b/portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py
new file mode 100644
index 0000000..06f8110
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py
@@ -0,0 +1,42 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import get_required_use_flags
+from portage.exception import InvalidDependString
+
+class TestCheckRequiredUse(TestCase):
+
+ def testCheckRequiredUse(self):
+ test_cases = (
+ ("a b c", ["a", "b", "c"]),
+
+ ("|| ( a b c )", ["a", "b", "c"]),
+ ("^^ ( a b c )", ["a", "b", "c"]),
+
+ ("|| ( a b ^^ ( d e f ) )", ["a", "b", "d", "e", "f"]),
+ ("^^ ( a b || ( d e f ) )", ["a", "b", "d", "e", "f"]),
+
+ ("( ^^ ( a ( b ) ( || ( ( d e ) ( f ) ) ) ) )", ["a", "b", "d", "e", "f"]),
+
+ ("a? ( ^^ ( b c ) )", ["a", "b", "c"]),
+ ("a? ( ^^ ( !b !d? ( c ) ) )", ["a", "b", "c", "d"]),
+ )
+
+ test_cases_xfail = (
+ ("^^ ( || ( a b ) ^^ ( b c )"),
+ ("^^( || ( a b ) ^^ ( b c ) )"),
+ ("^^ || ( a b ) ^^ ( b c )"),
+ ("^^ ( ( || ) ( a b ) ^^ ( b c ) )"),
+ ("^^ ( || ( a b ) ) ^^ ( b c ) )"),
+ )
+
+ for required_use, expected in test_cases:
+ result = get_required_use_flags(required_use)
+ expected = set(expected)
+ self.assertEqual(result, expected, \
+ "REQUIRED_USE: '%s', expected: '%s', got: '%s'" % (required_use, expected, result))
+
+ for required_use in test_cases_xfail:
+ self.assertRaisesMsg("REQUIRED_USE: '%s'" % (required_use,), \
+ InvalidDependString, get_required_use_flags, required_use)
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_isjustname.py b/portage_with_autodep/pym/portage/tests/dep/test_isjustname.py
new file mode 100644
index 0000000..c16fb54
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_isjustname.py
@@ -0,0 +1,24 @@
+# test_isjustname.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import isjustname
+
+class IsJustName(TestCase):
+
+ def testIsJustName(self):
+
+ cats = ( "", "sys-apps/", "foo/", "virtual/" )
+ pkgs = ( "portage", "paludis", "pkgcore", "notARealPkg" )
+ vers = ( "", "-2.0-r3", "-1.0_pre2", "-3.1b" )
+
+ for pkg in pkgs:
+ for cat in cats:
+ for ver in vers:
+ if len(ver):
+ self.assertFalse( isjustname( cat + pkg + ver ),
+ msg="isjustname(%s) is True!" % (cat + pkg + ver) )
+ else:
+ self.assertTrue( isjustname( cat + pkg + ver ),
+ msg="isjustname(%s) is False!" % (cat + pkg + ver) )
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py b/portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py
new file mode 100644
index 0000000..173ab0d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py
@@ -0,0 +1,146 @@
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import isvalidatom
+
+class IsValidAtomTestCase(object):
+ def __init__(self, atom, expected, allow_wildcard=False, allow_repo=False):
+ self.atom = atom
+ self.expected = expected
+ self.allow_wildcard = allow_wildcard
+ self.allow_repo = allow_repo
+
+class IsValidAtom(TestCase):
+
+ def testIsValidAtom(self):
+
+ test_cases = (
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1*", True),
+ IsValidAtomTestCase(">=sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("<=sys-apps/portage-2.1", True),
+ IsValidAtomTestCase(">sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("<sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("~sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("sys-apps/portage:foo", True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:foo", False),
+ IsValidAtomTestCase( "sys-apps/portage-2.1:", False),
+ IsValidAtomTestCase("sys-apps/portage-2.1:", False),
+ IsValidAtomTestCase("sys-apps/portage-2.1:[foo]", False),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar?,!baz?,!doc=,build=]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[doc?]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc?]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[doc=]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc=]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc!=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar][-baz][doc?][!build?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?,]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[,bar,-baz,doc?,!build?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz][doc?,!build?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar][doc,build]", False),
+ IsValidAtomTestCase(">~cate-gory/foo-1.0", False),
+ IsValidAtomTestCase(">~category/foo-1.0", False),
+ IsValidAtomTestCase("<~category/foo-1.0", False),
+ IsValidAtomTestCase("###cat/foo-1.0", False),
+ IsValidAtomTestCase("~sys-apps/portage", False),
+ IsValidAtomTestCase("portage", False),
+ IsValidAtomTestCase("=portage", False),
+ IsValidAtomTestCase(">=portage-2.1", False),
+ IsValidAtomTestCase("~portage-2.1", False),
+ IsValidAtomTestCase("=portage-2.1*", False),
+ IsValidAtomTestCase("null/portage", True),
+ IsValidAtomTestCase("null/portage*:0", False),
+ IsValidAtomTestCase(">=null/portage-2.1", True),
+ IsValidAtomTestCase(">=null/portage", False),
+ IsValidAtomTestCase(">null/portage", False),
+ IsValidAtomTestCase("=null/portage*", False),
+ IsValidAtomTestCase("=null/portage", False),
+ IsValidAtomTestCase("~null/portage", False),
+ IsValidAtomTestCase("<=null/portage", False),
+ IsValidAtomTestCase("<null/portage", False),
+ IsValidAtomTestCase("~null/portage-2.1", True),
+ IsValidAtomTestCase("=null/portage-2.1*", True),
+ IsValidAtomTestCase("null/portage-2.1*", False),
+ IsValidAtomTestCase("app-doc/php-docs-20071125", False),
+ IsValidAtomTestCase("app-doc/php-docs-20071125-r2", False),
+ IsValidAtomTestCase("=foo/bar-1-r1-1-r1", False),
+ IsValidAtomTestCase("foo/-z-1", False),
+
+ # These are invalid because pkg name must not end in hyphen
+ # followed by numbers
+ IsValidAtomTestCase("=foo/bar-1-r1-1-r1", False),
+ IsValidAtomTestCase("=foo/bar-123-1", False),
+ IsValidAtomTestCase("=foo/bar-123-1*", False),
+ IsValidAtomTestCase("foo/bar-123", False),
+ IsValidAtomTestCase("=foo/bar-123-1-r1", False),
+ IsValidAtomTestCase("=foo/bar-123-1-r1*", False),
+ IsValidAtomTestCase("foo/bar-123-r1", False),
+ IsValidAtomTestCase("foo/bar-1", False),
+
+ IsValidAtomTestCase("=foo/bar--baz-1-r1", True),
+ IsValidAtomTestCase("=foo/bar-baz--1-r1", True),
+ IsValidAtomTestCase("=foo/bar-baz---1-r1", True),
+ IsValidAtomTestCase("=foo/bar-baz---1", True),
+ IsValidAtomTestCase("=foo/bar-baz-1--r1", False),
+ IsValidAtomTestCase("games-strategy/ufo2000", True),
+ IsValidAtomTestCase("~games-strategy/ufo2000-0.1", True),
+ IsValidAtomTestCase("=media-libs/x264-20060810", True),
+ IsValidAtomTestCase("foo/b", True),
+ IsValidAtomTestCase("app-text/7plus", True),
+ IsValidAtomTestCase("foo/666", True),
+ IsValidAtomTestCase("=dev-libs/poppler-qt3-0.11*", True),
+
+ #Testing atoms with repositories
+ IsValidAtomTestCase("sys-apps/portage::repo_123-name", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1::repo", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1*::repo", True, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage:foo::repo", True, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:foo::repo", False, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:::repo", False, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:::repo[foo]", False, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[bar?,!baz?,!doc=,build=]", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[!doc]", False, allow_repo=True),
+ IsValidAtomTestCase("###cat/foo-1.0::repo", False, allow_repo=True),
+ IsValidAtomTestCase("~sys-apps/portage::repo", False, allow_repo=True),
+ IsValidAtomTestCase("portage::repo", False, allow_repo=True),
+ IsValidAtomTestCase("=portage::repo", False, allow_repo=True),
+ IsValidAtomTestCase("null/portage::repo", True, allow_repo=True),
+ IsValidAtomTestCase("app-doc/php-docs-20071125::repo", False, allow_repo=True),
+ IsValidAtomTestCase("=foo/bar-1-r1-1-r1::repo", False, allow_repo=True),
+
+ IsValidAtomTestCase("sys-apps/portage::repo_123-name", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.1::repo", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.1*::repo", False, allow_repo=False),
+ IsValidAtomTestCase("sys-apps/portage:foo::repo", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[bar?,!baz?,!doc=,build=]", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", False, allow_repo=False),
+ IsValidAtomTestCase("null/portage::repo", False, allow_repo=False),
+ )
+
+ for test_case in test_cases:
+ if test_case.expected:
+ atom_type = "valid"
+ else:
+ atom_type = "invalid"
+ self.assertEqual( bool(isvalidatom(test_case.atom, allow_wildcard=test_case.allow_wildcard, \
+ allow_repo=test_case.allow_repo)), test_case.expected,
+ msg="isvalidatom(%s) != %s" % ( test_case.atom, test_case.expected ) )
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py b/portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py
new file mode 100644
index 0000000..afba414
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py
@@ -0,0 +1,108 @@
+# Copyright 2006, 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.tests import TestCase
+from portage.dep import Atom, match_from_list, _repo_separator
+from portage.versions import catpkgsplit
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class Package(object):
+ """
+ Provides a minimal subset of attributes of _emerge.Package.Package
+ """
+ def __init__(self, atom):
+ atom = Atom(atom, allow_repo=True)
+ self.cp = atom.cp
+ self.cpv = atom.cpv
+ self.cpv_split = catpkgsplit(self.cpv)
+ self.slot = atom.slot
+ self.repo = atom.repo
+ if atom.use:
+ self.use = self._use_class(atom.use.enabled)
+ self.iuse = self._iuse_class(atom.use.required)
+ else:
+ self.use = self._use_class([])
+ self.iuse = self._iuse_class([])
+
+ class _use_class(object):
+ def __init__(self, use):
+ self.enabled = frozenset(use)
+
+ class _iuse_class(object):
+ def __init__(self, iuse):
+ self.all = frozenset(iuse)
+
+ def is_valid_flag(self, flags):
+ if isinstance(flags, basestring):
+ flags = [flags]
+ for flag in flags:
+ if not flag in self.all:
+ return False
+ return True
+
+class Test_match_from_list(TestCase):
+
+ def testMatch_from_list(self):
+ tests = (
+ ("=sys-apps/portage-45*", [], [] ),
+ ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+ ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+ ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+ ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+ ("=sys-apps/portage-045", ["sys-apps/portage-046"], [] ),
+ ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"] ),
+ ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], [] ),
+ ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+ ("<=sys-apps/portage-045", ["sys-apps/portage-046"], [] ),
+ ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+ ("<sys-apps/portage-046", ["sys-apps/portage-046"], [] ),
+ (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+ (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ),
+ (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+ (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ),
+ ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"] ),
+ ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], [] ),
+ ("=sys-fs/udev-1*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]),
+ ("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
+ ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
+ ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
+ ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
+ ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"] ),
+ ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"] ),
+ ("dev-libs/*", ["sys-apps/portage-2.1.2"], [] ),
+ ("*/tar", ["sys-apps/portage-2.1.2"], [] ),
+ ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"] ),
+ ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"] ),
+
+ ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"] ),
+ ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"] ),
+ ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], [] ),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], [] ),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], [] ),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"] ),
+ ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ),
+ ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], [] ),
+ ("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ),
+
+ ("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"] ),
+ ("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"] ),
+ ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], [] ),
+ ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"] ),
+ ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], [] ),
+ ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"] ),
+ )
+
+ for atom, cpv_list, expected_result in tests:
+ result = []
+ for pkg in match_from_list( atom, cpv_list ):
+ if isinstance(pkg, Package):
+ if pkg.repo:
+ result.append(pkg.cpv + _repo_separator + pkg.repo)
+ else:
+ result.append(pkg.cpv)
+ else:
+ result.append(pkg)
+ self.assertEqual( result, expected_result )
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py b/portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py
new file mode 100644
index 0000000..9a147a0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py
@@ -0,0 +1,66 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import paren_reduce
+from portage.exception import InvalidDependString
+
+class TestParenReduce(TestCase):
+
+ def testParenReduce(self):
+
+ test_cases = (
+ ( "A", ["A"]),
+ ( "( A )", ["A"]),
+ ( "|| ( A B )", [ "||", ["A", "B"] ]),
+ ( "|| ( A || ( B C ) )", [ "||", ["A", "||", ["B", "C"]]]),
+ ( "|| ( A || ( B C D ) )", [ "||", ["A", "||", ["B", "C", "D"]] ]),
+ ( "|| ( A || ( B || ( C D ) E ) )", [ "||", ["A", "||", ["B", "||", ["C", "D"], "E"]] ]),
+ ( "a? ( A )", ["a?", ["A"]]),
+
+ ( "( || ( ( ( A ) B ) ) )", ["A", "B"]),
+ ( "( || ( || ( ( A ) B ) ) )", [ "||", ["A", "B"] ]),
+ ( "|| ( A )", ["A"]),
+ ( "( || ( || ( || ( A ) foo? ( B ) ) ) )", [ "||", ["A", "foo?", ["B"] ]]),
+ ( "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )", [ "||", ["bar?", ["A"], "foo?", ["B"] ]]),
+ ( "A || ( ) foo? ( ) B", ["A", "B"]),
+
+ ( "|| ( A ) || ( B )", ["A", "B"]),
+ ( "foo? ( A ) foo? ( B )", ["foo?", ["A"], "foo?", ["B"]]),
+
+ ( "|| ( ( A B ) C )", [ "||", [ ["A", "B"], "C"] ]),
+ ( "|| ( ( A B ) ( C ) )", [ "||", [ ["A", "B"], "C"] ]),
+ # test USE dep defaults for bug #354003
+ ( ">=dev-lang/php-5.2[pcre(+)]", [ ">=dev-lang/php-5.2[pcre(+)]" ]),
+ )
+
+ test_cases_xfail = (
+ "( A",
+ "A )",
+
+ "||( A B )",
+ "|| (A B )",
+ "|| ( A B)",
+ "|| ( A B",
+ "|| A B )",
+
+ "|| A B",
+ "|| ( A B ) )",
+ "|| || B C",
+
+ "|| ( A B || )",
+
+ "a? A",
+
+ ( "( || ( || || ( A ) foo? ( B ) ) )"),
+ ( "( || ( || bar? ( A ) foo? ( B ) ) )"),
+ )
+
+ for dep_str, expected_result in test_cases:
+ self.assertEqual(paren_reduce(dep_str), expected_result,
+ "input: '%s' result: %s != %s" % (dep_str,
+ paren_reduce(dep_str), expected_result))
+
+ for dep_str in test_cases_xfail:
+ self.assertRaisesMsg(dep_str,
+ InvalidDependString, paren_reduce, dep_str)
diff --git a/portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py b/portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py
new file mode 100644
index 0000000..1618430
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py
@@ -0,0 +1,627 @@
+# Copyright 2009-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.exception import InvalidDependString
+from portage.dep import Atom, use_reduce
+
+class UseReduceTestCase(object):
+ def __init__(self, deparray, uselist=[], masklist=[], \
+ matchall=0, excludeall=[], is_src_uri=False, \
+ eapi="0", opconvert=False, flat=False, expected_result=None, \
+ is_valid_flag=None, token_class=None):
+ self.deparray = deparray
+ self.uselist = uselist
+ self.masklist = masklist
+ self.matchall = matchall
+ self.excludeall = excludeall
+ self.is_src_uri = is_src_uri
+ self.eapi = eapi
+ self.opconvert = opconvert
+ self.flat = flat
+ self.is_valid_flag = is_valid_flag
+ self.token_class = token_class
+ self.expected_result = expected_result
+
+ def run(self):
+ try:
+ return use_reduce(self.deparray, self.uselist, self.masklist, \
+ self.matchall, self.excludeall, self.is_src_uri, self.eapi, \
+ self.opconvert, self.flat, self.is_valid_flag, self.token_class)
+ except InvalidDependString as e:
+ raise InvalidDependString("%s: %s" % (e, self.deparray))
+
+class UseReduce(TestCase):
+
+ def always_true(self, ununsed_parameter):
+ return True
+
+ def always_false(self, ununsed_parameter):
+ return False
+
+ def testUseReduce(self):
+
+ EAPI_WITH_SRC_URI_ARROWS = "2"
+ EAPI_WITHOUT_SRC_URI_ARROWS = "0"
+
+ test_cases = (
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist = ["a", "b", "c", "d"],
+ expected_result = ["A", "B"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist = ["a", "b", "c"],
+ expected_result = ["A", "B", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist = ["b", "c"],
+ expected_result = ["B", "D"]
+ ),
+
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall = True,
+ expected_result = ["A", "B", "C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ masklist = ["a", "c"],
+ expected_result = ["C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall = True,
+ masklist = ["a", "c"],
+ expected_result = ["B", "C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist = ["a", "b"],
+ masklist = ["a", "c"],
+ expected_result = ["B", "C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ excludeall = ["a", "c"],
+ expected_result = ["D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist = ["b"],
+ excludeall = ["a", "c"],
+ expected_result = ["B", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall = True,
+ excludeall = ["a", "c"],
+ expected_result = ["A", "B", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall = True,
+ excludeall = ["a", "c"],
+ masklist = ["b"],
+ expected_result = ["A", "D"]
+ ),
+
+
+ UseReduceTestCase(
+ "a? ( b? ( AB ) )",
+ uselist = ["a", "b"],
+ expected_result = ["AB"]
+ ),
+ UseReduceTestCase(
+ "a? ( b? ( AB ) C )",
+ uselist = ["a"],
+ expected_result = ["C"]
+ ),
+ UseReduceTestCase(
+ "a? ( b? ( || ( AB CD ) ) )",
+ uselist = ["a", "b"],
+ expected_result = ["||", ["AB", "CD"]]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( A ) b? ( B ) ) )",
+ uselist = ["a", "b"],
+ expected_result = ["||", ["A", "B"]]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( A ) b? ( B ) ) )",
+ uselist = ["a"],
+ expected_result = ["A"]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( A ) b? ( B ) ) )",
+ uselist = [],
+ expected_result = []
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist = [],
+ expected_result = []
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist = ["a"],
+ expected_result = ["A"]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist = ["b"],
+ expected_result = ["B"]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist = ["c"],
+ expected_result = []
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist = ["a", "c"],
+ expected_result = ["||", [ "A", "C"]]
+ ),
+
+ #paren_reduce tests
+ UseReduceTestCase(
+ "A",
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "( A )",
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "|| ( A B )",
+ expected_result = [ "||", ["A", "B"] ]),
+ UseReduceTestCase(
+ "|| ( ( A B ) C )",
+ expected_result = [ "||", [ ["A", "B"], "C"] ]),
+ UseReduceTestCase(
+ "|| ( ( A B ) ( C ) )",
+ expected_result = [ "||", [ ["A", "B"], "C"] ]),
+ UseReduceTestCase(
+ "|| ( A || ( B C ) )",
+ expected_result = [ "||", ["A", "B", "C"]]),
+ UseReduceTestCase(
+ "|| ( A || ( B C D ) )",
+ expected_result = [ "||", ["A", "B", "C", "D"] ]),
+ UseReduceTestCase(
+ "|| ( A || ( B || ( C D ) E ) )",
+ expected_result = [ "||", ["A", "B", "C", "D", "E"] ]),
+ UseReduceTestCase(
+ "( || ( ( ( A ) B ) ) )",
+ expected_result = ["A", "B"] ),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ expected_result = [ "||", ["A", "B"] ]),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ expected_result = [ "||", ["A", "B"] ]),
+ UseReduceTestCase(
+ "|| ( A )",
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ uselist = ["foo"],
+ expected_result = [ "||", ["A", "B"] ]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ expected_result = []),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ uselist = ["foo", "bar"],
+ expected_result = [ "||", [ "A", "B" ] ]),
+ UseReduceTestCase(
+ "A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+ expected_result = ["A", "B"]),
+ UseReduceTestCase(
+ "|| ( A ) || ( B )",
+ expected_result = ["A", "B"]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ expected_result = []),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ uselist = ["foo"],
+ expected_result = ["A", "B"]),
+ UseReduceTestCase(
+ "|| ( A B ) C",
+ expected_result = ['||', ['A', 'B'], 'C']),
+ UseReduceTestCase(
+ "A || ( B C )",
+ expected_result = ['A', '||', ['B', 'C']]),
+
+ #SRC_URI stuff
+ UseReduceTestCase(
+ "http://foo/bar -> blah.tbz2",
+ is_src_uri = True,
+ eapi = EAPI_WITH_SRC_URI_ARROWS,
+ expected_result = ["http://foo/bar", "->", "blah.tbz2"]),
+ UseReduceTestCase(
+ "foo? ( http://foo/bar -> blah.tbz2 )",
+ uselist = [],
+ is_src_uri = True,
+ eapi = EAPI_WITH_SRC_URI_ARROWS,
+ expected_result = []),
+ UseReduceTestCase(
+ "foo? ( http://foo/bar -> blah.tbz2 )",
+ uselist = ["foo"],
+ is_src_uri = True,
+ eapi = EAPI_WITH_SRC_URI_ARROWS,
+ expected_result = ["http://foo/bar", "->", "blah.tbz2"]),
+ UseReduceTestCase(
+ "http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
+ uselist = [],
+ is_src_uri = True,
+ eapi = EAPI_WITH_SRC_URI_ARROWS,
+ expected_result = ["http://foo/bar", "->", "bar.tbz2"]),
+ UseReduceTestCase(
+ "http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
+ uselist = ["foo"],
+ is_src_uri = True,
+ eapi = EAPI_WITH_SRC_URI_ARROWS,
+ expected_result = ["http://foo/bar", "->", "bar.tbz2", "ftp://foo/a"]),
+ UseReduceTestCase(
+ "http://foo.com/foo http://foo/bar -> blah.tbz2",
+ uselist = ["foo"],
+ is_src_uri = True,
+ eapi = EAPI_WITH_SRC_URI_ARROWS,
+ expected_result = ["http://foo.com/foo", "http://foo/bar", "->", "blah.tbz2"]),
+
+ #opconvert tests
+ UseReduceTestCase(
+ "A",
+ opconvert = True,
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "( A )",
+ opconvert = True,
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "|| ( A B )",
+ opconvert = True,
+ expected_result = [['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "|| ( ( A B ) C )",
+ opconvert = True,
+ expected_result = [['||', ['A', 'B'], 'C']]),
+ UseReduceTestCase(
+ "|| ( A || ( B C ) )",
+ opconvert = True,
+ expected_result = [['||', 'A', 'B', 'C']]),
+ UseReduceTestCase(
+ "|| ( A || ( B C D ) )",
+ opconvert = True,
+ expected_result = [['||', 'A', 'B', 'C', 'D']]),
+ UseReduceTestCase(
+ "|| ( A || ( B || ( C D ) E ) )",
+ expected_result = [ "||", ["A", "B", "C", "D", "E"] ]),
+ UseReduceTestCase(
+ "( || ( ( ( A ) B ) ) )",
+ opconvert = True,
+ expected_result = [ "A", "B" ] ),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ opconvert = True,
+ expected_result = [['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "|| ( A B ) C",
+ opconvert = True,
+ expected_result = [['||', 'A', 'B'], 'C']),
+ UseReduceTestCase(
+ "A || ( B C )",
+ opconvert = True,
+ expected_result = ['A', ['||', 'B', 'C']]),
+ UseReduceTestCase(
+ "A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
+ uselist = ["foo", "bar"],
+ opconvert = True,
+ expected_result = ['A', ['||', 'B', 'C', 'D', 'E'], 'G']),
+ UseReduceTestCase(
+ "A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
+ uselist = ["foo", "bar"],
+ opconvert = False,
+ expected_result = ['A', '||', ['B', 'C', 'D', 'E'], 'G']),
+
+ UseReduceTestCase(
+ "|| ( A )",
+ opconvert = True,
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ uselist = ["foo"],
+ opconvert = True,
+ expected_result = [['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ opconvert = True,
+ expected_result = []),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ uselist = ["foo", "bar"],
+ opconvert = True,
+ expected_result = [['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+ opconvert = True,
+ expected_result = ["A", "B"]),
+ UseReduceTestCase(
+ "|| ( A ) || ( B )",
+ opconvert = True,
+ expected_result = ["A", "B"]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ opconvert = True,
+ expected_result = []),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ uselist = ["foo"],
+ opconvert = True,
+ expected_result = ["A", "B"]),
+ UseReduceTestCase(
+ "|| ( foo? ( || ( A B ) ) )",
+ uselist = ["foo"],
+ opconvert = True,
+ expected_result = [['||', 'A', 'B']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) foo? ( || ( C D ) ) )",
+ uselist = ["foo"],
+ opconvert = True,
+ expected_result = [['||', ['A', 'B'], 'C', 'D']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) foo? ( || ( C D ) ) )",
+ uselist = ["foo"],
+ opconvert = False,
+ expected_result = ['||', [['A', 'B'], 'C', 'D']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( C D ) )",
+ expected_result = ['||', [['A', 'B'], 'C', 'D']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
+ expected_result = ['||', [['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
+ opconvert = True,
+ expected_result = [['||', ['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+
+ UseReduceTestCase(
+ "|| ( foo? ( A B ) )",
+ uselist = ["foo"],
+ expected_result = ['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( foo? ( A B ) ) )",
+ uselist = ["foo"],
+ expected_result = ['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
+ uselist = ["a", "b", "c", "d", "e", "f"],
+ expected_result = ['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( ( || ( a? ( ( b? ( c? ( || ( || ( || ( ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) ) ) ) )",
+ uselist = ["a", "b", "c", "d", "e", "f"],
+ expected_result = ['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( ( A ( || ( B ) ) ) )",
+ expected_result = ['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
+ uselist = ["foo", "bar", "baz"],
+ expected_result = ['||', [['A', 'B'], ['C', 'D', '||', ['E', ['F', 'G'], 'H']]]]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
+ uselist = ["foo", "bar", "baz"],
+ opconvert = True,
+ expected_result = [['||', ['A', 'B'], ['C', 'D', ['||', 'E', ['F', 'G'], 'H']]]]),
+
+ UseReduceTestCase(
+ "|| ( foo? ( A B ) )",
+ uselist = ["foo"],
+ opconvert=True,
+ expected_result = ['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( foo? ( A B ) ) )",
+ uselist = ["foo"],
+ opconvert=True,
+ expected_result = ['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
+ uselist = ["a", "b", "c", "d", "e", "f"],
+ opconvert=True,
+ expected_result = ['A', 'B']),
+
+ #flat test
+ UseReduceTestCase(
+ "A",
+ flat = True,
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "( A )",
+ flat = True,
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "|| ( A B )",
+ flat = True,
+ expected_result = [ "||", "A", "B" ] ),
+ UseReduceTestCase(
+ "|| ( A || ( B C ) )",
+ flat = True,
+ expected_result = [ "||", "A", "||", "B", "C" ]),
+ UseReduceTestCase(
+ "|| ( A || ( B C D ) )",
+ flat = True,
+ expected_result = [ "||", "A", "||", "B", "C", "D" ]),
+ UseReduceTestCase(
+ "|| ( A || ( B || ( C D ) E ) )",
+ flat = True,
+ expected_result = [ "||", "A", "||", "B", "||", "C", "D", "E" ]),
+ UseReduceTestCase(
+ "( || ( ( ( A ) B ) ) )",
+ flat = True,
+ expected_result = [ "||", "A", "B"] ),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ flat = True,
+ expected_result = [ "||", "||", "A", "B" ]),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ flat = True,
+ expected_result = [ "||", "||", "A", "B" ]),
+ UseReduceTestCase(
+ "|| ( A )",
+ flat = True,
+ expected_result = ["||", "A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ uselist = ["foo"],
+ flat = True,
+ expected_result = [ "||", "||","||", "A", "B" ]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ flat = True,
+ expected_result = ["||", "||","||"]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ uselist = ["foo", "bar"],
+ flat = True,
+ expected_result = [ "||", "||", "A", "||", "B" ]),
+ UseReduceTestCase(
+ "A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+ flat = True,
+ expected_result = ["A", "||", "B"]),
+ UseReduceTestCase(
+ "|| ( A ) || ( B )",
+ flat = True,
+ expected_result = ["||", "A", "||", "B"]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ flat = True,
+ expected_result = []),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ uselist = ["foo"],
+ flat = True,
+ expected_result = ["A", "B"]),
+
+ #use flag validation
+ UseReduceTestCase(
+ "foo? ( A )",
+ uselist = ["foo"],
+ is_valid_flag = self.always_true,
+ expected_result = ["A"]),
+ UseReduceTestCase(
+ "foo? ( A )",
+ is_valid_flag = self.always_true,
+ expected_result = []),
+
+ #token_class
+ UseReduceTestCase(
+ "foo? ( dev-libs/A )",
+ uselist = ["foo"],
+ token_class=Atom,
+ expected_result = ["dev-libs/A"]),
+ UseReduceTestCase(
+ "foo? ( dev-libs/A )",
+ token_class=Atom,
+ expected_result = []),
+ )
+
+ test_cases_xfail = (
+ UseReduceTestCase("? ( A )"),
+ UseReduceTestCase("!? ( A )"),
+ UseReduceTestCase("( A"),
+ UseReduceTestCase("A )"),
+ UseReduceTestCase("||( A B )"),
+ UseReduceTestCase("|| (A B )"),
+ UseReduceTestCase("|| ( A B)"),
+ UseReduceTestCase("|| ( A B"),
+ UseReduceTestCase("|| A B )"),
+ UseReduceTestCase("|| A B"),
+ UseReduceTestCase("|| ( A B ) )"),
+ UseReduceTestCase("|| || B C"),
+ UseReduceTestCase("|| ( A B || )"),
+ UseReduceTestCase("a? A"),
+ UseReduceTestCase("( || ( || || ( A ) foo? ( B ) ) )"),
+ UseReduceTestCase("( || ( || bar? ( A ) foo? ( B ) ) )"),
+ UseReduceTestCase("foo?"),
+ UseReduceTestCase("foo? || ( A )"),
+ UseReduceTestCase("|| ( )"),
+ UseReduceTestCase("foo? ( )"),
+
+ #SRC_URI stuff
+ UseReduceTestCase("http://foo/bar -> blah.tbz2", is_src_uri = True, eapi = EAPI_WITHOUT_SRC_URI_ARROWS),
+ UseReduceTestCase("|| ( http://foo/bar -> blah.tbz2 )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo? ( ftp://foo/a )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar blah.tbz2 ->", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("-> http://foo/bar blah.tbz2 )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar ->", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo? ( http://foo.com/foo )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("foo? ( http://foo/bar -> ) blah.tbz2", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo/blah.tbz2", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+
+ UseReduceTestCase("http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri = False, eapi = EAPI_WITH_SRC_URI_ARROWS),
+
+ UseReduceTestCase(
+ "A",
+ opconvert = True,
+ flat = True),
+
+ #use flag validation
+ UseReduceTestCase("1.0? ( A )"),
+ UseReduceTestCase("!1.0? ( A )"),
+ UseReduceTestCase("!? ( A )"),
+ UseReduceTestCase("!?? ( A )"),
+ UseReduceTestCase(
+ "foo? ( A )",
+ is_valid_flag = self.always_false,
+ ),
+ UseReduceTestCase(
+ "foo? ( A )",
+ uselist = ["foo"],
+ is_valid_flag = self.always_false,
+ ),
+
+ #token_class
+ UseReduceTestCase(
+ "foo? ( A )",
+ uselist = ["foo"],
+ token_class=Atom),
+ UseReduceTestCase(
+ "A(B",
+ token_class=Atom),
+ )
+
+ for test_case in test_cases:
+ # If it fails then show the input, since lots of our
+ # test cases have the same output but different input,
+ # making it difficult deduce which test has failed.
+ self.assertEqual(test_case.run(), test_case.expected_result,
+ "input: '%s' result: %s != %s" % (test_case.deparray,
+ test_case.run(), test_case.expected_result))
+
+ for test_case in test_cases_xfail:
+ self.assertRaisesMsg(test_case.deparray, (InvalidDependString, ValueError), test_case.run)
diff --git a/portage_with_autodep/pym/portage/tests/ebuild/__init__.py b/portage_with_autodep/pym/portage/tests/ebuild/__init__.py
new file mode 100644
index 0000000..e2d487e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 1998-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/tests/ebuild/__test__ b/portage_with_autodep/pym/portage/tests/ebuild/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/__test__
diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py b/portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py
new file mode 100644
index 0000000..d8277f2
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py
@@ -0,0 +1,43 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import tempfile
+
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.tests import TestCase
+
+class ArrayFromfileEofTestCase(TestCase):
+
+ def testArrayFromfileEof(self):
+ # This tests if the following python issue is fixed
+ # in the currently running version of python:
+ # http://bugs.python.org/issue5334
+
+ input_data = "an arbitrary string"
+ input_bytes = _unicode_encode(input_data,
+ encoding='utf_8', errors='strict')
+ f = tempfile.TemporaryFile()
+ f.write(input_bytes)
+
+ f.seek(0)
+ data = []
+ eof = False
+ while not eof:
+ a = array.array('B')
+ try:
+ a.fromfile(f, len(input_bytes) + 1)
+ except (EOFError, IOError):
+ # python-3.0 lost data here
+ eof = True
+
+ if not a:
+ eof = True
+ else:
+ data.append(_unicode_decode(a.tostring(),
+ encoding='utf_8', errors='strict'))
+
+ f.close()
+
+ self.assertEqual(input_data, ''.join(data))
diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_config.py b/portage_with_autodep/pym/portage/tests/ebuild/test_config.py
new file mode 100644
index 0000000..7bec8c6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_config.py
@@ -0,0 +1,198 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.package.ebuild.config import config
+from portage.package.ebuild._config.LicenseManager import LicenseManager
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class ConfigTestCase(TestCase):
+
+ def testClone(self):
+ """
+ Test the clone via constructor.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ settings = config(clone=playground.settings)
+ result = playground.run(["=dev-libs/A-1"])
+ pkg, existing_node = result.depgraph._select_package(
+ playground.root, "=dev-libs/A-1")
+ settings.setcpv(pkg)
+
+ # clone after setcpv tests deepcopy of LazyItemsDict
+ settings2 = config(clone=settings)
+ finally:
+ playground.cleanup()
+
+ def testFeaturesMutation(self):
+ """
+ Test whether mutation of config.features updates the FEATURES
+ variable and persists through config.regenerate() calls. Also
+ verify that features_set._prune_overrides() works correctly.
+ """
+ playground = ResolverPlayground()
+ try:
+ settings = config(clone=playground.settings)
+
+ settings.features.add('noclean')
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+ settings.regenerate()
+ self.assertEqual('noclean' in settings['FEATURES'].split(),True)
+
+ settings.features.discard('noclean')
+ self.assertEqual('noclean' in settings['FEATURES'].split(), False)
+ settings.regenerate()
+ self.assertEqual('noclean' in settings['FEATURES'].split(), False)
+
+ settings.features.add('noclean')
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+ settings.regenerate()
+ self.assertEqual('noclean' in settings['FEATURES'].split(),True)
+
+ # before: ['noclean', '-noclean', 'noclean']
+ settings.features._prune_overrides()
+ # after: ['noclean']
+ self.assertEqual(settings._features_overrides.count('noclean'), 1)
+ self.assertEqual(settings._features_overrides.count('-noclean'), 0)
+
+ settings.features.remove('noclean')
+
+ # before: ['noclean', '-noclean']
+ settings.features._prune_overrides()
+ # after: ['-noclean']
+ self.assertEqual(settings._features_overrides.count('noclean'), 0)
+ self.assertEqual(settings._features_overrides.count('-noclean'), 1)
+ finally:
+ playground.cleanup()
+
+ def testLicenseManager(self):
+
+ user_config = {
+ "package.license":
+ (
+ "dev-libs/* TEST",
+ "dev-libs/A -TEST2",
+ "=dev-libs/A-2 TEST3 @TEST",
+ "*/* @EULA TEST2",
+ "=dev-libs/C-1 *",
+ "=dev-libs/C-2 -*",
+ ),
+ }
+
+ playground = ResolverPlayground(user_config=user_config)
+ try:
+ portage.util.noiselimit = -2
+
+ license_group_locations = (os.path.join(playground.portdir, "profiles"),)
+ pkg_license = os.path.join(playground.eroot, "etc", "portage")
+
+ lic_man = LicenseManager(license_group_locations, pkg_license)
+
+ self.assertEqual(lic_man._accept_license_str, None)
+ self.assertEqual(lic_man._accept_license, None)
+ self.assertEqual(lic_man._license_groups, {"EULA": frozenset(["TEST"])})
+ self.assertEqual(lic_man._undef_lic_groups, set(["TEST"]))
+
+ self.assertEqual(lic_man.extract_global_changes(), "TEST TEST2")
+ self.assertEqual(lic_man.extract_global_changes(), "")
+
+ lic_man.set_accept_license_str("TEST TEST2")
+ self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/B-1", "0", None), ["TEST", "TEST2", "TEST"])
+ self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/A-1", "0", None), ["TEST", "TEST2", "TEST", "-TEST2"])
+ self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/A-2", "0", None), ["TEST", "TEST2", "TEST", "-TEST2", "TEST3", "@TEST"])
+
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/B-1", [], "TEST", "0", None), "TEST")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/A-1", [], "-TEST2", "0", None), "")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/A-2", [], "|| ( TEST TEST2 )", "0", None), "TEST")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/C-1", [], "TEST5", "0", None), "TEST5")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/C-2", [], "TEST2", "0", None), "")
+
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/B-1", [], "TEST", "0", None), [])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-1", [], "-TEST2", "0", None), ["-TEST2"])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-2", [], "|| ( TEST TEST2 )", "0", None), [])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-3", [], "|| ( TEST2 || ( TEST3 TEST4 ) )", "0", None), ["TEST2", "TEST3", "TEST4"])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/C-1", [], "TEST5", "0", None), [])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/C-2", [], "TEST2", "0", None), ["TEST2"])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/D-1", [], "", "0", None), [])
+ finally:
+ portage.util.noiselimit = 0
+ playground.cleanup()
+
+ def testPackageMaskOrder(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/B-1": { },
+ "dev-libs/C-1": { },
+ "dev-libs/D-1": { },
+ "dev-libs/E-1": { },
+ }
+
+ repo_configs = {
+ "test_repo": {
+ "package.mask":
+ (
+ "dev-libs/A",
+ "dev-libs/C",
+ ),
+ }
+ }
+
+ profile = {
+ "package.mask":
+ (
+ "-dev-libs/A",
+ "dev-libs/B",
+ "-dev-libs/B",
+ "dev-libs/D",
+ ),
+ }
+
+ user_config = {
+ "package.mask":
+ (
+ "-dev-libs/C",
+ "-dev-libs/D",
+ "dev-libs/E",
+ ),
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ mergelist = ["dev-libs/C-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ mergelist = ["dev-libs/D-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, repo_configs=repo_configs, \
+ profile=profile, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py b/portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py
new file mode 100644
index 0000000..ed08b2a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py
@@ -0,0 +1,82 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage import _python_interpreter
+from portage import _shell_quote
+from portage.const import EBUILD_SH_BINARY
+from portage.package.ebuild.config import config
+from portage.package.ebuild.doebuild import spawn as doebuild_spawn
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.Package import Package
+from _emerge.PollScheduler import PollScheduler
+
+class DoebuildSpawnTestCase(TestCase):
+ """
+ Invoke portage.package.ebuild.doebuild.spawn() with a
+ minimal environment. This gives coverage to some of
+ the ebuild execution internals, like ebuild.sh,
+ AbstractEbuildProcess, and EbuildIpcDaemon.
+ """
+
+ def testDoebuildSpawn(self):
+ playground = ResolverPlayground()
+ try:
+ settings = config(clone=playground.settings)
+ cpv = 'sys-apps/portage-2.1'
+ metadata = {
+ 'EAPI' : '2',
+ 'INHERITED' : 'python eutils',
+ 'IUSE' : 'build doc epydoc python3 selinux',
+ 'LICENSE' : 'GPL-2',
+ 'PROVIDE' : 'virtual/portage',
+ 'RDEPEND' : '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6',
+ 'SLOT' : '0',
+ }
+ root_config = playground.trees[playground.root]['root_config']
+ pkg = Package(built=False, cpv=cpv, installed=False,
+ metadata=metadata, root_config=root_config,
+ type_name='ebuild')
+ settings.setcpv(pkg)
+ settings['PORTAGE_PYTHON'] = _python_interpreter
+ settings['PORTAGE_BUILDDIR'] = os.path.join(
+ settings['PORTAGE_TMPDIR'], cpv)
+ settings['T'] = os.path.join(
+ settings['PORTAGE_BUILDDIR'], 'temp')
+ for x in ('PORTAGE_BUILDDIR', 'T'):
+ os.makedirs(settings[x])
+ # Create a fake environment, to pretend as if the ebuild
+ # has been sourced already.
+ open(os.path.join(settings['T'], 'environment'), 'wb')
+
+ scheduler = PollScheduler().sched_iface
+ for phase in ('_internal_test',):
+
+ # Test EbuildSpawnProcess by calling doebuild.spawn() with
+ # returnpid=False. This case is no longer used by portage
+ # internals since EbuildPhase is used instead and that passes
+ # returnpid=True to doebuild.spawn().
+ rval = doebuild_spawn("%s %s" % (_shell_quote(
+ os.path.join(settings["PORTAGE_BIN_PATH"],
+ os.path.basename(EBUILD_SH_BINARY))), phase),
+ settings, free=1)
+ self.assertEqual(rval, os.EX_OK)
+
+ ebuild_phase = EbuildPhase(background=False,
+ phase=phase, scheduler=scheduler,
+ settings=settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ self.assertEqual(ebuild_phase.returncode, os.EX_OK)
+
+ ebuild_phase = MiscFunctionsProcess(background=False,
+ commands=['success_hooks'],
+ scheduler=scheduler, settings=settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ self.assertEqual(ebuild_phase.returncode, os.EX_OK)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py b/portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py
new file mode 100644
index 0000000..b5b4796
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py
@@ -0,0 +1,124 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import tempfile
+import time
+from portage import os
+from portage import _python_interpreter
+from portage.tests import TestCase
+from portage.const import PORTAGE_BIN_PATH
+from portage.const import PORTAGE_PYM_PATH
+from portage.const import BASH_BINARY
+from portage.package.ebuild._ipc.ExitCommand import ExitCommand
+from portage.util import ensure_dirs
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
+from _emerge.TaskScheduler import TaskScheduler
+
+class IpcDaemonTestCase(TestCase):
+
+ _SCHEDULE_TIMEOUT = 40000 # 40 seconds
+
+ def testIpcDaemon(self):
+ tmpdir = tempfile.mkdtemp()
+ build_dir = None
+ try:
+ env = {}
+
+ # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
+ # need to be inherited by ebuild subprocesses.
+ if 'PORTAGE_USERNAME' in os.environ:
+ env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
+ if 'PORTAGE_GRPNAME' in os.environ:
+ env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
+
+ env['PORTAGE_PYTHON'] = _python_interpreter
+ env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH
+ env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+ env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1')
+
+ task_scheduler = TaskScheduler(max_jobs=2)
+ build_dir = EbuildBuildDir(
+ scheduler=task_scheduler.sched_iface,
+ settings=env)
+ build_dir.lock()
+ ensure_dirs(env['PORTAGE_BUILDDIR'])
+
+ input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in')
+ output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out')
+ os.mkfifo(input_fifo)
+ os.mkfifo(output_fifo)
+
+ for exitcode in (0, 1, 2):
+ exit_command = ExitCommand()
+ commands = {'exit' : exit_command}
+ daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo,
+ scheduler=task_scheduler.sched_iface)
+ proc = SpawnProcess(
+ args=[BASH_BINARY, "-c",
+ '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode],
+ env=env, scheduler=task_scheduler.sched_iface)
+
+ self.received_command = False
+ def exit_command_callback():
+ self.received_command = True
+ proc.cancel()
+ daemon.cancel()
+
+ exit_command.reply_hook = exit_command_callback
+ task_scheduler.add(daemon)
+ task_scheduler.add(proc)
+ start_time = time.time()
+ task_scheduler.run(timeout=self._SCHEDULE_TIMEOUT)
+ task_scheduler.clear()
+
+ self.assertEqual(self.received_command, True,
+ "command not received after %d seconds" % \
+ (time.time() - start_time,))
+ self.assertEqual(proc.isAlive(), False)
+ self.assertEqual(daemon.isAlive(), False)
+ self.assertEqual(exit_command.exitcode, exitcode)
+
+ # Intentionally short timeout test for QueueScheduler.run()
+ sleep_time_s = 10 # 10.000 seconds
+ short_timeout_ms = 10 # 0.010 seconds
+
+ for i in range(3):
+ exit_command = ExitCommand()
+ commands = {'exit' : exit_command}
+ daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo,
+ scheduler=task_scheduler.sched_iface)
+ proc = SpawnProcess(
+ args=[BASH_BINARY, "-c", 'exec sleep %d' % sleep_time_s],
+ env=env, scheduler=task_scheduler.sched_iface)
+
+ self.received_command = False
+ def exit_command_callback():
+ self.received_command = True
+ proc.cancel()
+ daemon.cancel()
+
+ exit_command.reply_hook = exit_command_callback
+ task_scheduler.add(daemon)
+ task_scheduler.add(proc)
+ start_time = time.time()
+ task_scheduler.run(timeout=short_timeout_ms)
+ task_scheduler.clear()
+
+ self.assertEqual(self.received_command, False,
+ "command received after %d seconds" % \
+ (time.time() - start_time,))
+ self.assertEqual(proc.isAlive(), False)
+ self.assertEqual(daemon.isAlive(), False)
+ self.assertEqual(proc.returncode == os.EX_OK, False)
+
+ finally:
+ if build_dir is not None:
+ build_dir.unlock()
+ shutil.rmtree(tmpdir)
diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py b/portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py
new file mode 100644
index 0000000..4b6ff21
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py
@@ -0,0 +1,32 @@
+# Copyright 2009-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util._pty import _can_test_pty_eof, _test_pty_eof
+
+class PtyEofFdopenBufferedTestCase(TestCase):
+
+ def testPtyEofFdopenBuffered(self):
+ # This tests if the following python issue is fixed yet:
+ # http://bugs.python.org/issue5380
+ # Since it might not be fixed, mark as todo.
+ self.todo = True
+ # The result is only valid if openpty does not raise EnvironmentError.
+ if _can_test_pty_eof():
+ try:
+ self.assertEqual(_test_pty_eof(fdopen_buffered=True), True)
+ except EnvironmentError:
+ pass
+
+class PtyEofFdopenUnBufferedTestCase(TestCase):
+ def testPtyEofFdopenUnBuffered(self):
+ # New development: It appears that array.fromfile() is usable
+ # with python3 as long as fdopen is called with a bufsize
+ # argument of 0.
+
+ # The result is only valid if openpty does not raise EnvironmentError.
+ if _can_test_pty_eof():
+ try:
+ self.assertEqual(_test_pty_eof(), True)
+ except EnvironmentError:
+ pass
diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py b/portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py
new file mode 100644
index 0000000..fea4738
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py
@@ -0,0 +1,52 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import sys
+import tempfile
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.const import BASH_BINARY
+from portage.tests import TestCase
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.PollScheduler import PollScheduler
+
+class SpawnTestCase(TestCase):
+
+ def testLogfile(self):
+ logfile = None
+ try:
+ fd, logfile = tempfile.mkstemp()
+ os.close(fd)
+ null_fd = os.open('/dev/null', os.O_RDWR)
+ test_string = 2 * "blah blah blah\n"
+ scheduler = PollScheduler().sched_iface
+ proc = SpawnProcess(
+ args=[BASH_BINARY, "-c",
+ "echo -n '%s'" % test_string],
+ env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd},
+ scheduler=scheduler,
+ logfile=logfile)
+ proc.start()
+ os.close(null_fd)
+ self.assertEqual(proc.wait(), os.EX_OK)
+ f = io.open(_unicode_encode(logfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ log_content = f.read()
+ f.close()
+ # When logging passes through a pty, this comparison will fail
+ # unless the oflag terminal attributes have the termios.OPOST
+ # bit disabled. Otherwise, tranformations such as \n -> \r\n
+ # may occur.
+ self.assertEqual(test_string, log_content)
+ finally:
+ if logfile:
+ try:
+ os.unlink(logfile)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
diff --git a/portage_with_autodep/pym/portage/tests/env/__init__.py b/portage_with_autodep/pym/portage/tests/env/__init__.py
new file mode 100644
index 0000000..cbeabe5
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage/env/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
diff --git a/portage_with_autodep/pym/portage/tests/env/__test__ b/portage_with_autodep/pym/portage/tests/env/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/__test__
diff --git a/portage_with_autodep/pym/portage/tests/env/config/__init__.py b/portage_with_autodep/pym/portage/tests/env/config/__init__.py
new file mode 100644
index 0000000..ef5cc43
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage/env/config/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
diff --git a/portage_with_autodep/pym/portage/tests/env/config/__test__ b/portage_with_autodep/pym/portage/tests/env/config/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/__test__
diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py
new file mode 100644
index 0000000..f1e9e98
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py
@@ -0,0 +1,40 @@
+# test_PackageKeywordsFile.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PackageKeywordsFile
+from tempfile import mkstemp
+
+class PackageKeywordsFileTestCase(TestCase):
+
+ cpv = ['sys-apps/portage']
+ keywords = ['~x86', 'amd64', '-mips']
+
+ def testPackageKeywordsFile(self):
+ """
+ A simple test to ensure the load works properly
+ """
+
+ self.BuildFile()
+ try:
+ f = PackageKeywordsFile(self.fname)
+ f.load()
+ i = 0
+ for cpv, keyword in f.items():
+ self.assertEqual( cpv, self.cpv[i] )
+ [k for k in keyword if self.assertTrue(k in self.keywords)]
+ i = i + 1
+ finally:
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ for c in self.cpv:
+ f.write("%s %s\n" % (c,' '.join(self.keywords)))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py
new file mode 100644
index 0000000..0c5b30f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py
@@ -0,0 +1,29 @@
+# test_PackageMaskFile.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.env.config import PackageMaskFile
+from portage.tests import TestCase, test_cps
+from tempfile import mkstemp
+
+class PackageMaskFileTestCase(TestCase):
+
+ def testPackageMaskFile(self):
+ self.BuildFile()
+ try:
+ f = PackageMaskFile(self.fname)
+ f.load()
+ for atom in f:
+ self.assertTrue(atom in test_cps)
+ finally:
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ f.write("\n".join(test_cps))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py
new file mode 100644
index 0000000..7a38067
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py
@@ -0,0 +1,37 @@
+# test_PackageUseFile.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PackageUseFile
+from tempfile import mkstemp
+
+
+class PackageUseFileTestCase(TestCase):
+
+ cpv = 'sys-apps/portage'
+ useflags = ['cdrom', 'far', 'boo', 'flag', 'blat']
+
+ def testPackageUseFile(self):
+ """
+ A simple test to ensure the load works properly
+ """
+ self.BuildFile()
+ try:
+ f = PackageUseFile(self.fname)
+ f.load()
+ for cpv, use in f.items():
+ self.assertEqual( cpv, self.cpv )
+ [flag for flag in use if self.assertTrue(flag in self.useflags)]
+ finally:
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ f.write("%s %s" % (self.cpv, ' '.join(self.useflags)))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py
new file mode 100644
index 0000000..2cd1a8a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py
@@ -0,0 +1,39 @@
+# Copyright 2006-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PortageModulesFile
+from tempfile import mkstemp
+
+class PortageModulesFileTestCase(TestCase):
+
+ keys = ['foo.bar','baz','bob','extra_key']
+ invalid_keys = ['',""]
+ modules = ['spanky','zmedico','antarus','ricer','5','6']
+
+ def setUp(self):
+ self.items = {}
+ for k, v in zip(self.keys + self.invalid_keys,
+ self.modules):
+ self.items[k] = v
+
+ def testPortageModulesFile(self):
+ self.BuildFile()
+ f = PortageModulesFile(self.fname)
+ f.load()
+ for k in self.keys:
+ self.assertEqual(f[k], self.items[k])
+ for ik in self.invalid_keys:
+ self.assertEqual(False, ik in f)
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ for k, v in self.items.items():
+ f.write('%s=%s\n' % (k,v))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py b/portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py
diff --git a/portage_with_autodep/pym/portage/tests/lafilefixer/__test__ b/portage_with_autodep/pym/portage/tests/lafilefixer/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lafilefixer/__test__
diff --git a/portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py b/portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py
new file mode 100644
index 0000000..0bcffaa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py
@@ -0,0 +1,145 @@
+# test_lafilefixer.py -- Portage Unit Testing Functionality
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.exception import InvalidData
+
+class test_lafilefixer(TestCase):
+
+ def get_test_cases_clean(self):
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -lm'\n" + \
+ b"current=6\n" + \
+ b"age=0\n" + \
+ b"revision=2\n" + \
+ b"installed=yes\n" + \
+ b"dlopen=''\n" + \
+ b"dlpreopen=''\n" + \
+ b"libdir='/usr/lib64'\n"
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -lm'\n" + \
+ b"current=6\n" + \
+ b"age=0\n" + \
+ b"revision=2\n" + \
+ b"installed=yes\n" + \
+ b"dlopen=''\n" + \
+ b"dlpreopen=''\n" + \
+ b"libdir='/usr/lib64'\n"
+ yield b"dependency_libs=' liba.la /usr/lib64/bar.la -lc'\n"
+
+ def get_test_cases_update(self):
+ #.la -> -l*
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc'\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n"
+ #move stuff into inherited_linker_flags
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la -pthread /usr/lib64/libb.la -lc'\n" + \
+ b"inherited_linker_flags=''\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n" + \
+ b"inherited_linker_flags=' -pthread'\n"
+ #reorder
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la -R/usr/lib64 /usr/lib64/libb.la -lc'\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -la -lb -lc'\n"
+ #remove duplicates from dependency_libs (the original version didn't do it for inherited_linker_flags)
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libc.la -pthread -mt" + \
+ b" -L/usr/lib -R/usr/lib64 -lc /usr/lib64/libb.la -lc'\n" +\
+ b"inherited_linker_flags=' -pthread -pthread'\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -L/usr/lib -la -lc -lb'\n" +\
+ b"inherited_linker_flags=' -pthread -pthread -mt'\n"
+ #-L rewriting
+ yield b"dependency_libs=' -L/usr/X11R6/lib'\n", \
+ b"dependency_libs=' -L/usr/lib'\n"
+ yield b"dependency_libs=' -L/usr/local/lib'\n", \
+ b"dependency_libs=' -L/usr/lib'\n"
+ yield b"dependency_libs=' -L/usr/lib64/pkgconfig/../..'\n", \
+ b"dependency_libs=' -L/usr'\n"
+ yield b"dependency_libs=' -L/usr/lib/pkgconfig/..'\n", \
+ b"dependency_libs=' -L/usr/lib'\n"
+ yield b"dependency_libs=' -L/usr/lib/pkgconfig/../.. -L/usr/lib/pkgconfig/..'\n", \
+ b"dependency_libs=' -L/usr -L/usr/lib'\n"
+ #we once got a backtrace on this one
+ yield b"dependency_libs=' /usr/lib64/libMagickCore.la -L/usr/lib64 -llcms2 /usr/lib64/libtiff.la " + \
+ b"-ljbig -lc /usr/lib64/libfreetype.la /usr/lib64/libjpeg.la /usr/lib64/libXext.la " + \
+ b"/usr/lib64/libXt.la /usr/lib64/libSM.la -lICE -luuid /usr/lib64/libICE.la /usr/lib64/libX11.la " + \
+ b"/usr/lib64/libxcb.la /usr/lib64/libXau.la /usr/lib64/libXdmcp.la -lbz2 -lz -lm " + \
+ b"/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4/libgomp.la -lrt -lpthread /usr/lib64/libltdl.la -ldl " + \
+ b"/usr/lib64/libfpx.la -lstdc++'", \
+ b"dependency_libs=' -L/usr/lib64 -L/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4 -lMagickCore -llcms2 " + \
+ b"-ltiff -ljbig -lc -lfreetype -ljpeg -lXext -lXt -lSM -lICE -luuid -lX11 -lxcb -lXau -lXdmcp " + \
+ b"-lbz2 -lz -lm -lgomp -lrt -lpthread -lltdl -ldl -lfpx -lstdc++'"
+
+
+ def get_test_cases_broken(self):
+ yield b""
+ #no dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"current=6\n" + \
+ b"age=0\n" + \
+ b"revision=2\n"
+ #borken dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc' \n"
+ #borken dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc\n"
+ #crap in dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
+ #dependency_libs twice
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n" +\
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
+ #inherited_linker_flags twice
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"inherited_linker_flags=''\n" +\
+ b"inherited_linker_flags=''\n"
+
+ def testlafilefixer(self):
+ from portage.util.lafilefixer import _parse_lafile_contents, rewrite_lafile
+
+ for clean_contents in self.get_test_cases_clean():
+ self.assertEqual(rewrite_lafile(clean_contents), (False, None))
+
+ for original_contents, fixed_contents in self.get_test_cases_update():
+ self.assertEqual(rewrite_lafile(original_contents), (True, fixed_contents))
+
+ for broken_contents in self.get_test_cases_broken():
+ self.assertRaises(InvalidData, rewrite_lafile, broken_contents)
diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/__init__.py b/portage_with_autodep/pym/portage/tests/lazyimport/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lazyimport/__init__.py
diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/__test__ b/portage_with_autodep/pym/portage/tests/lazyimport/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lazyimport/__test__
diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py b/portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
new file mode 100644
index 0000000..08ccfa7
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
@@ -0,0 +1,81 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+import portage
+from portage import os
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+
+from _emerge.PollScheduler import PollScheduler
+from _emerge.PipeReader import PipeReader
+from _emerge.SpawnProcess import SpawnProcess
+
+class LazyImportPortageBaselineTestCase(TestCase):
+
+ _module_re = re.compile(r'^(portage|repoman|_emerge)\.')
+
+ _baseline_imports = frozenset([
+ 'portage.const', 'portage.localization',
+ 'portage.proxy', 'portage.proxy.lazyimport',
+ 'portage.proxy.objectproxy',
+ 'portage._selinux',
+ ])
+
+ _baseline_import_cmd = [portage._python_interpreter, '-c', '''
+import os
+import sys
+sys.path.insert(0, os.environ["PORTAGE_PYM_PATH"])
+import portage
+sys.stdout.write(" ".join(k for k in sys.modules
+ if sys.modules[k] is not None))
+''']
+
+ def testLazyImportPortageBaseline(self):
+ """
+ Check what modules are imported by a baseline module import.
+ """
+
+ env = os.environ.copy()
+ pythonpath = env.get('PYTHONPATH')
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is None:
+ pythonpath = ''
+ else:
+ pythonpath = ':' + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+ env[pythonpath] = pythonpath
+
+ # If python is patched to insert the path of the
+ # currently installed portage module into sys.path,
+ # then the above PYTHONPATH override doesn't help.
+ env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+
+ scheduler = PollScheduler().sched_iface
+ master_fd, slave_fd = os.pipe()
+ master_file = os.fdopen(master_fd, 'rb', 0)
+ slave_file = os.fdopen(slave_fd, 'wb')
+ producer = SpawnProcess(
+ args=self._baseline_import_cmd,
+ env=env, fd_pipes={1:slave_fd},
+ scheduler=scheduler)
+ producer.start()
+ slave_file.close()
+
+ consumer = PipeReader(
+ input_files={"producer" : master_file},
+ scheduler=scheduler)
+
+ consumer.start()
+ consumer.wait()
+ self.assertEqual(producer.wait(), os.EX_OK)
+ self.assertEqual(consumer.wait(), os.EX_OK)
+
+ output = consumer.getvalue().decode('ascii', 'replace').split()
+
+ unexpected_modules = " ".join(sorted(x for x in output \
+ if self._module_re.match(x) is not None and \
+ x not in self._baseline_imports))
+
+ self.assertEqual("", unexpected_modules)
diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py b/portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py
new file mode 100644
index 0000000..9d20eba
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py
@@ -0,0 +1,16 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.tests import TestCase
+
+class PreloadPortageSubmodulesTestCase(TestCase):
+
+ def testPreloadPortageSubmodules(self):
+ """
+ Verify that _preload_portage_submodules() doesn't leave any
+ remaining proxies that refer to the portage.* namespace.
+ """
+ portage.proxy.lazyimport._preload_portage_submodules()
+ for name in portage.proxy.lazyimport._module_proxies:
+ self.assertEqual(name.startswith('portage.'), False)
diff --git a/portage_with_autodep/pym/portage/tests/lint/__init__.py b/portage_with_autodep/pym/portage/tests/lint/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lint/__init__.py
diff --git a/portage_with_autodep/pym/portage/tests/lint/__test__ b/portage_with_autodep/pym/portage/tests/lint/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lint/__test__
diff --git a/portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.py b/portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.py
new file mode 100644
index 0000000..aef8d74
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.py
@@ -0,0 +1,42 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import stat
+
+from portage.const import BASH_BINARY, PORTAGE_BIN_PATH
+from portage.tests import TestCase
+from portage import os
+from portage import subprocess_getstatusoutput
+from portage import _encodings
+from portage import _shell_quote
+from portage import _unicode_decode, _unicode_encode
+
+class BashSyntaxTestCase(TestCase):
+
+ def testBashSyntax(self):
+ for parent, dirs, files in os.walk(PORTAGE_BIN_PATH):
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ for x in files:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ ext = x.split('.')[-1]
+ if ext in ('.py', '.pyc', '.pyo'):
+ continue
+ x = os.path.join(parent, x)
+ st = os.lstat(x)
+ if not stat.S_ISREG(st.st_mode):
+ continue
+
+ # Check for bash shebang
+ f = open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ line = _unicode_decode(f.readline(),
+ encoding=_encodings['content'], errors='replace')
+ f.close()
+ if line[:2] == '#!' and \
+ 'bash' in line:
+ cmd = "%s -n %s" % (_shell_quote(BASH_BINARY), _shell_quote(x))
+ status, output = subprocess_getstatusoutput(cmd)
+ self.assertEqual(os.WIFEXITED(status) and \
+ os.WEXITSTATUS(status) == os.EX_OK, True, msg=output)
diff --git a/portage_with_autodep/pym/portage/tests/lint/test_compile_modules.py b/portage_with_autodep/pym/portage/tests/lint/test_compile_modules.py
new file mode 100644
index 0000000..f90a666
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lint/test_compile_modules.py
@@ -0,0 +1,46 @@
+# Copyright 2009-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import itertools
+import stat
+
+from portage.const import PORTAGE_BIN_PATH, PORTAGE_PYM_PATH
+from portage.tests import TestCase
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+
+import py_compile
+
+class CompileModulesTestCase(TestCase):
+
+ def testCompileModules(self):
+ for parent, dirs, files in itertools.chain(
+ os.walk(PORTAGE_BIN_PATH),
+ os.walk(PORTAGE_PYM_PATH)):
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ for x in files:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ if x[-4:] in ('.pyc', '.pyo'):
+ continue
+ x = os.path.join(parent, x)
+ st = os.lstat(x)
+ if not stat.S_ISREG(st.st_mode):
+ continue
+ do_compile = False
+ if x[-3:] == '.py':
+ do_compile = True
+ else:
+ # Check for python shebang
+ f = open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ line = _unicode_decode(f.readline(),
+ encoding=_encodings['content'], errors='replace')
+ f.close()
+ if line[:2] == '#!' and \
+ 'python' in line:
+ do_compile = True
+ if do_compile:
+ py_compile.compile(x, cfile='/dev/null', doraise=True)
diff --git a/portage_with_autodep/pym/portage/tests/lint/test_import_modules.py b/portage_with_autodep/pym/portage/tests/lint/test_import_modules.py
new file mode 100644
index 0000000..8d257c5
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lint/test_import_modules.py
@@ -0,0 +1,40 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+
+class ImportModulesTestCase(TestCase):
+
+ def testImportModules(self):
+ expected_failures = frozenset((
+ ))
+
+ for mod in self._iter_modules(PORTAGE_PYM_PATH):
+ try:
+ __import__(mod)
+ except ImportError as e:
+ if mod not in expected_failures:
+ self.assertTrue(False, "failed to import '%s': %s" % (mod, e))
+ del e
+
+ def _iter_modules(self, base_dir):
+ for parent, dirs, files in os.walk(base_dir):
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ parent_mod = parent[len(PORTAGE_PYM_PATH)+1:]
+ parent_mod = parent_mod.replace("/", ".")
+ for x in files:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ if x[-3:] != '.py':
+ continue
+ x = x[:-3]
+ if x[-8:] == '__init__':
+ x = parent_mod
+ else:
+ x = parent_mod + "." + x
+ yield x
diff --git a/portage_with_autodep/pym/portage/tests/locks/__init__.py b/portage_with_autodep/pym/portage/tests/locks/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/locks/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/tests/locks/__test__ b/portage_with_autodep/pym/portage/tests/locks/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/locks/__test__
diff --git a/portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py b/portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py
new file mode 100644
index 0000000..8946caf
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py
@@ -0,0 +1,124 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import signal
+import tempfile
+
+from portage import os
+from portage.tests import TestCase
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.PollScheduler import PollScheduler
+
+class AsynchronousLockTestCase(TestCase):
+
+ def testAsynchronousLock(self):
+ scheduler = PollScheduler().sched_iface
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ for force_async in (True, False):
+ for force_dummy in (True, False):
+ async_lock = AsynchronousLock(path=path,
+ scheduler=scheduler, _force_async=force_async,
+ _force_thread=True,
+ _force_dummy=force_dummy)
+ async_lock.start()
+ self.assertEqual(async_lock.wait(), os.EX_OK)
+ self.assertEqual(async_lock.returncode, os.EX_OK)
+ async_lock.unlock()
+
+ async_lock = AsynchronousLock(path=path,
+ scheduler=scheduler, _force_async=force_async,
+ _force_process=True)
+ async_lock.start()
+ self.assertEqual(async_lock.wait(), os.EX_OK)
+ self.assertEqual(async_lock.returncode, os.EX_OK)
+ async_lock.unlock()
+
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLockWait(self):
+ scheduler = PollScheduler().sched_iface
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+ lock1.start()
+ self.assertEqual(lock1.wait(), os.EX_OK)
+ self.assertEqual(lock1.returncode, os.EX_OK)
+
+ # lock2 requires _force_async=True since the portage.locks
+ # module is not designed to work as intended here if the
+ # same process tries to lock the same file more than
+ # one time concurrently.
+ lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+ _force_async=True, _force_process=True)
+ lock2.start()
+ # lock2 should be waiting for lock1 to release
+ self.assertEqual(lock2.poll(), None)
+ self.assertEqual(lock2.returncode, None)
+
+ lock1.unlock()
+ self.assertEqual(lock2.wait(), os.EX_OK)
+ self.assertEqual(lock2.returncode, os.EX_OK)
+ lock2.unlock()
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLockWaitCancel(self):
+ scheduler = PollScheduler().sched_iface
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+ lock1.start()
+ self.assertEqual(lock1.wait(), os.EX_OK)
+ self.assertEqual(lock1.returncode, os.EX_OK)
+ lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+ _force_async=True, _force_process=True)
+ lock2.start()
+ # lock2 should be waiting for lock1 to release
+ self.assertEqual(lock2.poll(), None)
+ self.assertEqual(lock2.returncode, None)
+
+ # Cancel lock2 and then check wait() and returncode results.
+ lock2.cancel()
+ self.assertEqual(lock2.wait() == os.EX_OK, False)
+ self.assertEqual(lock2.returncode == os.EX_OK, False)
+ self.assertEqual(lock2.returncode is None, False)
+ lock1.unlock()
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLockWaitKill(self):
+ scheduler = PollScheduler().sched_iface
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+ lock1.start()
+ self.assertEqual(lock1.wait(), os.EX_OK)
+ self.assertEqual(lock1.returncode, os.EX_OK)
+ lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+ _force_async=True, _force_process=True)
+ lock2.start()
+ # lock2 should be waiting for lock1 to release
+ self.assertEqual(lock2.poll(), None)
+ self.assertEqual(lock2.returncode, None)
+
+ # Kill lock2's process and then check wait() and
+ # returncode results. This is intended to simulate
+ # a SIGINT sent via the controlling tty.
+ self.assertEqual(lock2._imp is not None, True)
+ self.assertEqual(lock2._imp._proc is not None, True)
+ self.assertEqual(lock2._imp._proc.pid is not None, True)
+ lock2._imp._kill_test = True
+ os.kill(lock2._imp._proc.pid, signal.SIGTERM)
+ self.assertEqual(lock2.wait() == os.EX_OK, False)
+ self.assertEqual(lock2.returncode == os.EX_OK, False)
+ self.assertEqual(lock2.returncode is None, False)
+ lock1.unlock()
+ finally:
+ shutil.rmtree(tempdir)
diff --git a/portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py b/portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py
new file mode 100644
index 0000000..d5748ad
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py
@@ -0,0 +1,46 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import tempfile
+import traceback
+
+import portage
+from portage import os
+from portage.tests import TestCase
+
+class LockNonblockTestCase(TestCase):
+
+ def testLockNonblock(self):
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = portage.locks.lockfile(path)
+ pid = os.fork()
+ if pid == 0:
+ portage.process._setup_pipes({0:0, 1:1, 2:2})
+ rval = 2
+ try:
+ try:
+ lock2 = portage.locks.lockfile(path, flags=os.O_NONBLOCK)
+ except portage.exception.TryAgain:
+ rval = os.EX_OK
+ else:
+ rval = 1
+ portage.locks.unlockfile(lock2)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ finally:
+ os._exit(rval)
+
+ self.assertEqual(pid > 0, True)
+ pid, status = os.waitpid(pid, 0)
+ self.assertEqual(os.WIFEXITED(status), True)
+ self.assertEqual(os.WEXITSTATUS(status), os.EX_OK)
+
+ portage.locks.unlockfile(lock1)
+ finally:
+ shutil.rmtree(tempdir)
+
diff --git a/portage_with_autodep/pym/portage/tests/news/__init__.py b/portage_with_autodep/pym/portage/tests/news/__init__.py
new file mode 100644
index 0000000..28a753f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/news/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.news/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/tests/news/__test__ b/portage_with_autodep/pym/portage/tests/news/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/news/__test__
diff --git a/portage_with_autodep/pym/portage/tests/news/test_NewsItem.py b/portage_with_autodep/pym/portage/tests/news/test_NewsItem.py
new file mode 100644
index 0000000..a4e76f3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/news/test_NewsItem.py
@@ -0,0 +1,95 @@
+# test_NewsItem.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.news import NewsItem
+from portage.dbapi.virtual import testdbapi
+from tempfile import mkstemp
+# TODO(antarus) Make newsitem use a loader so we can load using a string instead of a tempfile
+
+class NewsItemTestCase(TestCase):
+ """These tests suck: they use your running config instead of making their own"""
+ fakeItem = """
+Title: YourSQL Upgrades from 4.0 to 4.1
+Author: Ciaran McCreesh <ciaranm@gentoo.org>
+Content-Type: text/plain
+Posted: 01-Nov-2005
+Revision: 1
+#Display-If-Installed:
+#Display-If-Profile:
+#Display-If-Arch:
+
+YourSQL databases created using YourSQL version 4.0 are incompatible
+with YourSQL version 4.1 or later. There is no reliable way to
+automate the database format conversion, so action from the system
+administrator is required before an upgrade can take place.
+
+Please see the Gentoo YourSQL Upgrade Guide for instructions:
+
+ http://www.gentoo.org/doc/en/yoursql-upgrading.xml
+
+Also see the official YourSQL documentation:
+
+ http://dev.yoursql.com/doc/refman/4.1/en/upgrading-from-4-0.html
+
+After upgrading, you should also recompile any packages which link
+against YourSQL:
+
+ revdep-rebuild --library=libyoursqlclient.so.12
+
+The revdep-rebuild tool is provided by app-portage/gentoolkit.
+"""
+ def setUp(self):
+ self.profile = "/usr/portage/profiles/default-linux/x86/2007.0/"
+ self.keywords = "x86"
+ # Use fake/test dbapi to avoid slow tests
+ self.vardb = testdbapi()
+ # self.vardb.inject_cpv('sys-apps/portage-2.0', { 'SLOT' : 0 })
+ # Consumers only use ARCH, so avoid portage.settings by using a dict
+ self.settings = { 'ARCH' : 'x86' }
+
+ def testDisplayIfProfile(self):
+ tmpItem = self.fakeItem[:].replace("#Display-If-Profile:", "Display-If-Profile: %s" %
+ self.profile)
+
+ item = self._processItem(tmpItem)
+ try:
+ self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+ msg="Expected %s to be relevant, but it was not!" % tmpItem)
+ finally:
+ os.unlink(item.path)
+
+ def testDisplayIfInstalled(self):
+ tmpItem = self.fakeItem[:].replace("#Display-If-Installed:", "Display-If-Installed: %s" %
+ "sys-apps/portage")
+
+ try:
+ item = self._processItem(tmpItem)
+ self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+ msg="Expected %s to be relevant, but it was not!" % tmpItem)
+ finally:
+ os.unlink(item.path)
+
+ def testDisplayIfKeyword(self):
+ tmpItem = self.fakeItem[:].replace("#Display-If-Keyword:", "Display-If-Keyword: %s" %
+ self.keywords)
+
+ try:
+ item = self._processItem(tmpItem)
+ self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+ msg="Expected %s to be relevant, but it was not!" % tmpItem)
+ finally:
+ os.unlink(item.path)
+
+ def _processItem(self, item):
+ filename = None
+ fd, filename = mkstemp()
+ f = os.fdopen(fd, 'w')
+ f.write(item)
+ f.close()
+ try:
+ return NewsItem(filename, 0)
+ except TypeError:
+ self.fail("Error while processing news item %s" % filename)
diff --git a/portage_with_autodep/pym/portage/tests/process/__init__.py b/portage_with_autodep/pym/portage/tests/process/__init__.py
new file mode 100644
index 0000000..d19e353
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/process/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 1998-2008 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/tests/process/__test__ b/portage_with_autodep/pym/portage/tests/process/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/process/__test__
diff --git a/portage_with_autodep/pym/portage/tests/process/test_poll.py b/portage_with_autodep/pym/portage/tests/process/test_poll.py
new file mode 100644
index 0000000..ee6ee0c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/process/test_poll.py
@@ -0,0 +1,39 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from _emerge.PollScheduler import PollScheduler
+from _emerge.PipeReader import PipeReader
+from _emerge.SpawnProcess import SpawnProcess
+
+class PipeReaderTestCase(TestCase):
+
+ def testPipeReader(self):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ test_string = 2 * "blah blah blah\n"
+
+ scheduler = PollScheduler().sched_iface
+ master_fd, slave_fd = os.pipe()
+ master_file = os.fdopen(master_fd, 'rb', 0)
+ slave_file = os.fdopen(slave_fd, 'wb')
+ producer = SpawnProcess(
+ args=["bash", "-c", "echo -n '%s'" % test_string],
+ env=os.environ, fd_pipes={1:slave_fd},
+ scheduler=scheduler)
+ producer.start()
+ slave_file.close()
+
+ consumer = PipeReader(
+ input_files={"producer" : master_file},
+ scheduler=scheduler)
+
+ consumer.start()
+ consumer.wait()
+ output = consumer.getvalue().decode('ascii', 'replace')
+ self.assertEqual(test_string, output)
diff --git a/portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py b/portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py
new file mode 100644
index 0000000..6a8e3c1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py
@@ -0,0 +1,690 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import permutations
+import shutil
+import sys
+import tempfile
+import portage
+from portage import os
+from portage.const import PORTAGE_BASE_PATH
+from portage.dbapi.vartree import vartree
+from portage.dbapi.porttree import portagetree
+from portage.dbapi.bintree import binarytree
+from portage.dep import Atom, _repo_separator
+from portage.package.ebuild.config import config
+from portage.package.ebuild.digestgen import digestgen
+from portage._sets import load_default_config
+from portage._sets.base import InternalPackageSet
+from portage.versions import catsplit
+
+import _emerge
+from _emerge.actions import calc_depclean
+from _emerge.Blocker import Blocker
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.depgraph import backtrack_depgraph
+from _emerge.RootConfig import RootConfig
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class ResolverPlayground(object):
+ """
+ This class helps to create the necessary files on disk and
+ the needed settings instances, etc. for the resolver to do
+ its work.
+ """
+
+ config_files = frozenset(("package.use", "package.mask", "package.keywords", \
+ "package.unmask", "package.properties", "package.license", "use.mask", "use.force"))
+
+ def __init__(self, ebuilds={}, installed={}, profile={}, repo_configs={}, \
+ user_config={}, sets={}, world=[], debug=False):
+ """
+ ebuilds: cpv -> metadata mapping simulating available ebuilds.
+ installed: cpv -> metadata mapping simulating installed packages.
+ If a metadata key is missing, it gets a default value.
+ profile: settings defined by the profile.
+ """
+ self.debug = debug
+ self.root = "/"
+ self.eprefix = tempfile.mkdtemp()
+ self.eroot = self.root + self.eprefix.lstrip(os.sep) + os.sep
+ self.portdir = os.path.join(self.eroot, "usr/portage")
+ self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
+ os.makedirs(self.portdir)
+ os.makedirs(self.vdbdir)
+
+ if not debug:
+ portage.util.noiselimit = -2
+
+ self.repo_dirs = {}
+ #Make sure the main repo is always created
+ self._get_repo_dir("test_repo")
+
+ self._create_ebuilds(ebuilds)
+ self._create_installed(installed)
+ self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets)
+ self._create_world(world)
+
+ self.settings, self.trees = self._load_config()
+
+ self._create_ebuild_manifests(ebuilds)
+
+ portage.util.noiselimit = 0
+
+ def _get_repo_dir(self, repo):
+ """
+ Create the repo directory if needed.
+ """
+ if repo not in self.repo_dirs:
+ if repo == "test_repo":
+ repo_path = self.portdir
+ else:
+ repo_path = os.path.join(self.eroot, "usr", "local", repo)
+
+ self.repo_dirs[repo] = repo_path
+ profile_path = os.path.join(repo_path, "profiles")
+
+ try:
+ os.makedirs(profile_path)
+ except os.error:
+ pass
+
+ repo_name_file = os.path.join(profile_path, "repo_name")
+ f = open(repo_name_file, "w")
+ f.write("%s\n" % repo)
+ f.close()
+
+ return self.repo_dirs[repo]
+
+ def _create_ebuilds(self, ebuilds):
+ for cpv in ebuilds:
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ metadata = ebuilds[cpv].copy()
+ eapi = metadata.pop("EAPI", 0)
+ lic = metadata.pop("LICENSE", "")
+ properties = metadata.pop("PROPERTIES", "")
+ slot = metadata.pop("SLOT", 0)
+ keywords = metadata.pop("KEYWORDS", "x86")
+ iuse = metadata.pop("IUSE", "")
+ depend = metadata.pop("DEPEND", "")
+ rdepend = metadata.pop("RDEPEND", None)
+ pdepend = metadata.pop("PDEPEND", None)
+ required_use = metadata.pop("REQUIRED_USE", None)
+
+ if metadata:
+ raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" % (cpv, metadata.keys()))
+
+ repo_dir = self._get_repo_dir(repo)
+ ebuild_dir = os.path.join(repo_dir, a.cp)
+ ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
+ try:
+ os.makedirs(ebuild_dir)
+ except os.error:
+ pass
+
+ f = open(ebuild_path, "w")
+ f.write('EAPI="' + str(eapi) + '"\n')
+ f.write('LICENSE="' + str(lic) + '"\n')
+ f.write('PROPERTIES="' + str(properties) + '"\n')
+ f.write('SLOT="' + str(slot) + '"\n')
+ f.write('KEYWORDS="' + str(keywords) + '"\n')
+ f.write('IUSE="' + str(iuse) + '"\n')
+ f.write('DEPEND="' + str(depend) + '"\n')
+ if rdepend is not None:
+ f.write('RDEPEND="' + str(rdepend) + '"\n')
+ if pdepend is not None:
+ f.write('PDEPEND="' + str(pdepend) + '"\n')
+ if required_use is not None:
+ f.write('REQUIRED_USE="' + str(required_use) + '"\n')
+ f.close()
+
+ def _create_ebuild_manifests(self, ebuilds):
+ tmpsettings = config(clone=self.settings)
+ tmpsettings['PORTAGE_QUIET'] = '1'
+ for cpv in ebuilds:
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ repo_dir = self._get_repo_dir(repo)
+ ebuild_dir = os.path.join(repo_dir, a.cp)
+ ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
+
+ portdb = self.trees[self.root]["porttree"].dbapi
+ tmpsettings['O'] = ebuild_dir
+ if not digestgen(mysettings=tmpsettings, myportdb=portdb):
+ raise AssertionError('digest creation failed for %s' % ebuild_path)
+
+ def _create_installed(self, installed):
+ for cpv in installed:
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ vdb_pkg_dir = os.path.join(self.vdbdir, a.cpv)
+ try:
+ os.makedirs(vdb_pkg_dir)
+ except os.error:
+ pass
+
+ metadata = installed[cpv].copy()
+ eapi = metadata.pop("EAPI", 0)
+ lic = metadata.pop("LICENSE", "")
+ properties = metadata.pop("PROPERTIES", "")
+ slot = metadata.pop("SLOT", 0)
+ keywords = metadata.pop("KEYWORDS", "~x86")
+ iuse = metadata.pop("IUSE", "")
+ use = metadata.pop("USE", "")
+ depend = metadata.pop("DEPEND", "")
+ rdepend = metadata.pop("RDEPEND", None)
+ pdepend = metadata.pop("PDEPEND", None)
+ required_use = metadata.pop("REQUIRED_USE", None)
+
+ if metadata:
+ raise ValueError("metadata of installed '%s' contains unknown keys: %s" % (cpv, metadata.keys()))
+
+ def write_key(key, value):
+ f = open(os.path.join(vdb_pkg_dir, key), "w")
+ f.write(str(value) + "\n")
+ f.close()
+
+ write_key("EAPI", eapi)
+ write_key("LICENSE", lic)
+ write_key("PROPERTIES", properties)
+ write_key("SLOT", slot)
+ write_key("LICENSE", lic)
+ write_key("PROPERTIES", properties)
+ write_key("repository", repo)
+ write_key("KEYWORDS", keywords)
+ write_key("IUSE", iuse)
+ write_key("USE", use)
+ write_key("DEPEND", depend)
+ if rdepend is not None:
+ write_key("RDEPEND", rdepend)
+ if pdepend is not None:
+ write_key("PDEPEND", pdepend)
+ if required_use is not None:
+ write_key("REQUIRED_USE", required_use)
+
+ def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):
+
+ for repo in self.repo_dirs:
+ repo_dir = self._get_repo_dir(repo)
+ profile_dir = os.path.join(self._get_repo_dir(repo), "profiles")
+
+ #Create $REPO/profiles/categories
+ categories = set()
+ for cpv in ebuilds:
+ ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
+ if ebuilds_repo is None:
+ ebuilds_repo = "test_repo"
+ if ebuilds_repo == repo:
+ categories.add(catsplit(cpv)[0])
+
+ categories_file = os.path.join(profile_dir, "categories")
+ f = open(categories_file, "w")
+ for cat in categories:
+ f.write(cat + "\n")
+ f.close()
+
+ #Create $REPO/profiles/license_groups
+ license_file = os.path.join(profile_dir, "license_groups")
+ f = open(license_file, "w")
+ f.write("EULA TEST\n")
+ f.close()
+
+ repo_config = repo_configs.get(repo)
+ if repo_config:
+ for config_file, lines in repo_config.items():
+ if config_file not in self.config_files:
+ raise ValueError("Unknown config file: '%s'" % config_file)
+
+ file_name = os.path.join(profile_dir, config_file)
+ f = open(file_name, "w")
+ for line in lines:
+ f.write("%s\n" % line)
+ f.close()
+
+ #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
+ os.makedirs(os.path.join(repo_dir, "eclass"))
+
+ if repo == "test_repo":
+ #Create a minimal profile in /usr/portage
+ sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
+ os.makedirs(sub_profile_dir)
+
+ eapi_file = os.path.join(sub_profile_dir, "eapi")
+ f = open(eapi_file, "w")
+ f.write("0\n")
+ f.close()
+
+ make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
+ f = open(make_defaults_file, "w")
+ f.write("ARCH=\"x86\"\n")
+ f.write("ACCEPT_KEYWORDS=\"x86\"\n")
+ f.close()
+
+ use_force_file = os.path.join(sub_profile_dir, "use.force")
+ f = open(use_force_file, "w")
+ f.write("x86\n")
+ f.close()
+
+ if profile:
+ for config_file, lines in profile.items():
+ if config_file not in self.config_files:
+ raise ValueError("Unknown config file: '%s'" % config_file)
+
+ file_name = os.path.join(sub_profile_dir, config_file)
+ f = open(file_name, "w")
+ for line in lines:
+ f.write("%s\n" % line)
+ f.close()
+
+ #Create profile symlink
+ os.makedirs(os.path.join(self.eroot, "etc"))
+ os.symlink(sub_profile_dir, os.path.join(self.eroot, "etc", "make.profile"))
+
+ user_config_dir = os.path.join(self.eroot, "etc", "portage")
+
+ try:
+ os.makedirs(user_config_dir)
+ except os.error:
+ pass
+
+ repos_conf_file = os.path.join(user_config_dir, "repos.conf")
+ f = open(repos_conf_file, "w")
+ priority = 0
+ for repo in sorted(self.repo_dirs.keys()):
+ f.write("[%s]\n" % repo)
+ f.write("LOCATION=%s\n" % self.repo_dirs[repo])
+ if repo == "test_repo":
+ f.write("PRIORITY=%s\n" % -1000)
+ else:
+ f.write("PRIORITY=%s\n" % priority)
+ priority += 1
+ f.close()
+
+ for config_file, lines in user_config.items():
+ if config_file not in self.config_files:
+ raise ValueError("Unknown config file: '%s'" % config_file)
+
+ file_name = os.path.join(user_config_dir, config_file)
+ f = open(file_name, "w")
+ for line in lines:
+ f.write("%s\n" % line)
+ f.close()
+
+ #Create /usr/share/portage/config/sets/portage.conf
+ default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
+
+ try:
+ os.makedirs(default_sets_conf_dir)
+ except os.error:
+ pass
+
+ provided_sets_portage_conf = \
+ os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf")
+ os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))
+
+ set_config_dir = os.path.join(user_config_dir, "sets")
+
+ try:
+ os.makedirs(set_config_dir)
+ except os.error:
+ pass
+
+ for sets_file, lines in sets.items():
+ file_name = os.path.join(set_config_dir, sets_file)
+ f = open(file_name, "w")
+ for line in lines:
+ f.write("%s\n" % line)
+ f.close()
+
+ user_config_dir = os.path.join(self.eroot, "etc", "portage")
+
+ try:
+ os.makedirs(user_config_dir)
+ except os.error:
+ pass
+
+ for config_file, lines in user_config.items():
+ if config_file not in self.config_files:
+ raise ValueError("Unknown config file: '%s'" % config_file)
+
+ file_name = os.path.join(user_config_dir, config_file)
+ f = open(file_name, "w")
+ for line in lines:
+ f.write("%s\n" % line)
+ f.close()
+
+ def _create_world(self, world):
+ #Create /var/lib/portage/world
+ var_lib_portage = os.path.join(self.eroot, "var", "lib", "portage")
+ os.makedirs(var_lib_portage)
+
+ world_file = os.path.join(var_lib_portage, "world")
+
+ f = open(world_file, "w")
+ for atom in world:
+ f.write("%s\n" % atom)
+ f.close()
+
+ def _load_config(self):
+ portdir_overlay = []
+ for repo_name in sorted(self.repo_dirs):
+ path = self.repo_dirs[repo_name]
+ if path != self.portdir:
+ portdir_overlay.append(path)
+
+ env = {
+ "ACCEPT_KEYWORDS": "x86",
+ "PORTDIR": self.portdir,
+ "PORTDIR_OVERLAY": " ".join(portdir_overlay),
+ 'PORTAGE_TMPDIR' : os.path.join(self.eroot, 'var/tmp'),
+ }
+
+ # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
+ # need to be inherited by ebuild subprocesses.
+ if 'PORTAGE_USERNAME' in os.environ:
+ env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
+ if 'PORTAGE_GRPNAME' in os.environ:
+ env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
+
+ settings = config(_eprefix=self.eprefix, env=env)
+ settings.lock()
+
+ trees = {
+ self.root: {
+ "vartree": vartree(settings=settings),
+ "porttree": portagetree(self.root, settings=settings),
+ "bintree": binarytree(self.root,
+ os.path.join(self.eroot, "usr/portage/packages"),
+ settings=settings)
+ }
+ }
+
+ for root, root_trees in trees.items():
+ settings = root_trees["vartree"].settings
+ settings._init_dirs()
+ setconfig = load_default_config(settings, root_trees)
+ root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
+
+ return settings, trees
+
+ def run(self, atoms, options={}, action=None):
+ options = options.copy()
+ options["--pretend"] = True
+ if self.debug:
+ options["--debug"] = True
+
+ global_noiselimit = portage.util.noiselimit
+ global_emergelog_disable = _emerge.emergelog._disable
+ try:
+
+ if not self.debug:
+ portage.util.noiselimit = -2
+ _emerge.emergelog._disable = True
+
+ if options.get("--depclean"):
+ rval, cleanlist, ordered, req_pkg_count = \
+ calc_depclean(self.settings, self.trees, None,
+ options, "depclean", InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
+ result = ResolverPlaygroundDepcleanResult( \
+ atoms, rval, cleanlist, ordered, req_pkg_count)
+ else:
+ params = create_depgraph_params(options, action)
+ success, depgraph, favorites = backtrack_depgraph(
+ self.settings, self.trees, options, params, action, atoms, None)
+ depgraph._show_merge_list()
+ depgraph.display_problems()
+ result = ResolverPlaygroundResult(atoms, success, depgraph, favorites)
+ finally:
+ portage.util.noiselimit = global_noiselimit
+ _emerge.emergelog._disable = global_emergelog_disable
+
+ return result
+
+ def run_TestCase(self, test_case):
+ if not isinstance(test_case, ResolverPlaygroundTestCase):
+ raise TypeError("ResolverPlayground needs a ResolverPlaygroundTestCase")
+ for atoms in test_case.requests:
+ result = self.run(atoms, test_case.options, test_case.action)
+ if not test_case.compare_with_result(result):
+ return
+
+ def cleanup(self):
+ portdb = self.trees[self.root]["porttree"].dbapi
+ portdb.close_caches()
+ portage.dbapi.porttree.portdbapi.portdbapi_instances.remove(portdb)
+ if self.debug:
+ print("\nEROOT=%s" % self.eroot)
+ else:
+ shutil.rmtree(self.eroot)
+
+class ResolverPlaygroundTestCase(object):
+
+ def __init__(self, request, **kwargs):
+ self.all_permutations = kwargs.pop("all_permutations", False)
+ self.ignore_mergelist_order = kwargs.pop("ignore_mergelist_order", False)
+ self.ambiguous_merge_order = kwargs.pop("ambiguous_merge_order", False)
+ self.check_repo_names = kwargs.pop("check_repo_names", False)
+ self.merge_order_assertions = kwargs.pop("merge_order_assertions", False)
+
+ if self.all_permutations:
+ self.requests = list(permutations(request))
+ else:
+ self.requests = [request]
+
+ self.options = kwargs.pop("options", {})
+ self.action = kwargs.pop("action", None)
+ self.test_success = True
+ self.fail_msg = None
+ self._checks = kwargs.copy()
+
+ def compare_with_result(self, result):
+ checks = dict.fromkeys(result.checks)
+ for key, value in self._checks.items():
+ if not key in checks:
+ raise KeyError("Not an available check: '%s'" % key)
+ checks[key] = value
+
+ fail_msgs = []
+ for key, value in checks.items():
+ got = getattr(result, key)
+ expected = value
+
+ if key in result.optional_checks and expected is None:
+ continue
+
+ if key == "mergelist":
+ if not self.check_repo_names:
+ #Strip repo names if we don't check them
+ if got:
+ new_got = []
+ for cpv in got:
+ if cpv[:1] == "!":
+ new_got.append(cpv)
+ continue
+ a = Atom("="+cpv, allow_repo=True)
+ new_got.append(a.cpv)
+ got = new_got
+ if expected:
+ new_expected = []
+ for obj in expected:
+ if isinstance(obj, basestring):
+ if obj[:1] == "!":
+ new_expected.append(obj)
+ continue
+ a = Atom("="+obj, allow_repo=True)
+ new_expected.append(a.cpv)
+ continue
+ new_expected.append(set())
+ for cpv in obj:
+ if cpv[:1] != "!":
+ cpv = Atom("="+cpv, allow_repo=True).cpv
+ new_expected[-1].add(cpv)
+ expected = new_expected
+ if self.ignore_mergelist_order and got is not None:
+ got = set(got)
+ expected = set(expected)
+
+ if self.ambiguous_merge_order and got:
+ expected_stack = list(reversed(expected))
+ got_stack = list(reversed(got))
+ new_expected = []
+ match = True
+ while got_stack and expected_stack:
+ got_token = got_stack.pop()
+ expected_obj = expected_stack.pop()
+ if isinstance(expected_obj, basestring):
+ new_expected.append(expected_obj)
+ if got_token == expected_obj:
+ continue
+ # result doesn't match, so stop early
+ match = False
+ break
+ expected_obj = set(expected_obj)
+ try:
+ expected_obj.remove(got_token)
+ except KeyError:
+ # result doesn't match, so stop early
+ match = False
+ break
+ new_expected.append(got_token)
+ while got_stack and expected_obj:
+ got_token = got_stack.pop()
+ try:
+ expected_obj.remove(got_token)
+ except KeyError:
+ match = False
+ break
+ new_expected.append(got_token)
+ if not match:
+ # result doesn't match, so stop early
+ break
+ if expected_obj:
+ # result does not match, so stop early
+ match = False
+ new_expected.append(tuple(expected_obj))
+ break
+ if expected_stack:
+ # result does not match, add leftovers to new_expected
+ match = False
+ expected_stack.reverse()
+ new_expected.extend(expected_stack)
+ expected = new_expected
+
+ if match and self.merge_order_assertions:
+ for node1, node2 in self.merge_order_assertions:
+ if not (got.index(node1) < got.index(node2)):
+ fail_msgs.append("atoms: (" + \
+ ", ".join(result.atoms) + "), key: " + \
+ ("merge_order_assertions, expected: %s" % \
+ str((node1, node2))) + \
+ ", got: " + str(got))
+
+ elif key in ("unstable_keywords", "needed_p_mask_changes") and expected is not None:
+ expected = set(expected)
+
+ if got != expected:
+ fail_msgs.append("atoms: (" + ", ".join(result.atoms) + "), key: " + \
+ key + ", expected: " + str(expected) + ", got: " + str(got))
+ if fail_msgs:
+ self.test_success = False
+ self.fail_msg = "\n".join(fail_msgs)
+ return False
+ return True
+
+class ResolverPlaygroundResult(object):
+
+ checks = (
+ "success", "mergelist", "use_changes", "license_changes", "unstable_keywords", "slot_collision_solutions",
+ "circular_dependency_solutions", "needed_p_mask_changes",
+ )
+ optional_checks = (
+ )
+
+ def __init__(self, atoms, success, mydepgraph, favorites):
+ self.atoms = atoms
+ self.success = success
+ self.depgraph = mydepgraph
+ self.favorites = favorites
+ self.mergelist = None
+ self.use_changes = None
+ self.license_changes = None
+ self.unstable_keywords = None
+ self.needed_p_mask_changes = None
+ self.slot_collision_solutions = None
+ self.circular_dependency_solutions = None
+
+ if self.depgraph._dynamic_config._serialized_tasks_cache is not None:
+ self.mergelist = []
+ for x in self.depgraph._dynamic_config._serialized_tasks_cache:
+ if isinstance(x, Blocker):
+ self.mergelist.append(x.atom)
+ else:
+ repo_str = ""
+ if x.metadata["repository"] != "test_repo":
+ repo_str = _repo_separator + x.metadata["repository"]
+ self.mergelist.append(x.cpv + repo_str)
+
+ if self.depgraph._dynamic_config._needed_use_config_changes:
+ self.use_changes = {}
+ for pkg, needed_use_config_changes in \
+ self.depgraph._dynamic_config._needed_use_config_changes.items():
+ new_use, changes = needed_use_config_changes
+ self.use_changes[pkg.cpv] = changes
+
+ if self.depgraph._dynamic_config._needed_unstable_keywords:
+ self.unstable_keywords = set()
+ for pkg in self.depgraph._dynamic_config._needed_unstable_keywords:
+ self.unstable_keywords.add(pkg.cpv)
+
+ if self.depgraph._dynamic_config._needed_p_mask_changes:
+ self.needed_p_mask_changes = set()
+ for pkg in self.depgraph._dynamic_config._needed_p_mask_changes:
+ self.needed_p_mask_changes.add(pkg.cpv)
+
+ if self.depgraph._dynamic_config._needed_license_changes:
+ self.license_changes = {}
+ for pkg, missing_licenses in self.depgraph._dynamic_config._needed_license_changes.items():
+ self.license_changes[pkg.cpv] = missing_licenses
+
+ if self.depgraph._dynamic_config._slot_conflict_handler is not None:
+ self.slot_collision_solutions = []
+ handler = self.depgraph._dynamic_config._slot_conflict_handler
+
+ for change in handler.changes:
+ new_change = {}
+ for pkg in change:
+ new_change[pkg.cpv] = change[pkg]
+ self.slot_collision_solutions.append(new_change)
+
+ if self.depgraph._dynamic_config._circular_dependency_handler is not None:
+ handler = self.depgraph._dynamic_config._circular_dependency_handler
+ sol = handler.solutions
+ self.circular_dependency_solutions = dict( zip([x.cpv for x in sol.keys()], sol.values()) )
+
+class ResolverPlaygroundDepcleanResult(object):
+
+ checks = (
+ "success", "cleanlist", "ordered", "req_pkg_count",
+ )
+ optional_checks = (
+ "ordered", "req_pkg_count",
+ )
+
+ def __init__(self, atoms, rval, cleanlist, ordered, req_pkg_count):
+ self.atoms = atoms
+ self.success = rval == 0
+ self.cleanlist = cleanlist
+ self.ordered = ordered
+ self.req_pkg_count = req_pkg_count
diff --git a/portage_with_autodep/pym/portage/tests/resolver/__init__.py b/portage_with_autodep/pym/portage/tests/resolver/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/tests/resolver/__test__ b/portage_with_autodep/pym/portage/tests/resolver/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/__test__
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py b/portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py
new file mode 100644
index 0000000..54c435f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py
@@ -0,0 +1,326 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class AutounmaskTestCase(TestCase):
+
+ def testAutounmask(self):
+
+ ebuilds = {
+ #ebuilds to test use changes
+ "dev-libs/A-1": { "SLOT": 1, "DEPEND": "dev-libs/B[foo]", "EAPI": 2},
+ "dev-libs/A-2": { "SLOT": 2, "DEPEND": "dev-libs/B[bar]", "EAPI": 2},
+ "dev-libs/B-1": { "DEPEND": "foo? ( dev-libs/C ) bar? ( dev-libs/D )", "IUSE": "foo bar"},
+ "dev-libs/C-1": {},
+ "dev-libs/D-1": {},
+
+ #ebuilds to test if we allow changing of masked or forced flags
+ "dev-libs/E-1": { "SLOT": 1, "DEPEND": "dev-libs/F[masked-flag]", "EAPI": 2},
+ "dev-libs/E-2": { "SLOT": 2, "DEPEND": "dev-libs/G[-forced-flag]", "EAPI": 2},
+ "dev-libs/F-1": { "IUSE": "masked-flag"},
+ "dev-libs/G-1": { "IUSE": "forced-flag"},
+
+ #ebuilds to test keyword changes
+ "app-misc/Z-1": { "KEYWORDS": "~x86", "DEPEND": "app-misc/Y" },
+ "app-misc/Y-1": { "KEYWORDS": "~x86" },
+ "app-misc/W-1": {},
+ "app-misc/W-2": { "KEYWORDS": "~x86" },
+ "app-misc/V-1": { "KEYWORDS": "~x86", "DEPEND": ">=app-misc/W-2"},
+
+ #ebuilds to test mask and keyword changes
+ "app-text/A-1": {},
+ "app-text/B-1": { "KEYWORDS": "~x86" },
+ "app-text/C-1": { "KEYWORDS": "" },
+ "app-text/D-1": { "KEYWORDS": "~x86" },
+ "app-text/D-2": { "KEYWORDS": "" },
+
+ #ebuilds for mixed test for || dep handling
+ "sci-libs/K-1": { "DEPEND": " || ( sci-libs/L[bar] || ( sci-libs/M sci-libs/P ) )", "EAPI": 2},
+ "sci-libs/K-2": { "DEPEND": " || ( sci-libs/L[bar] || ( sci-libs/P sci-libs/M ) )", "EAPI": 2},
+ "sci-libs/K-3": { "DEPEND": " || ( sci-libs/M || ( sci-libs/L[bar] sci-libs/P ) )", "EAPI": 2},
+ "sci-libs/K-4": { "DEPEND": " || ( sci-libs/M || ( sci-libs/P sci-libs/L[bar] ) )", "EAPI": 2},
+ "sci-libs/K-5": { "DEPEND": " || ( sci-libs/P || ( sci-libs/L[bar] sci-libs/M ) )", "EAPI": 2},
+ "sci-libs/K-6": { "DEPEND": " || ( sci-libs/P || ( sci-libs/M sci-libs/L[bar] ) )", "EAPI": 2},
+ "sci-libs/K-7": { "DEPEND": " || ( sci-libs/M sci-libs/L[bar] )", "EAPI": 2},
+ "sci-libs/K-8": { "DEPEND": " || ( sci-libs/L[bar] sci-libs/M )", "EAPI": 2},
+
+ "sci-libs/L-1": { "IUSE": "bar" },
+ "sci-libs/M-1": { "KEYWORDS": "~x86" },
+ "sci-libs/P-1": { },
+
+ #ebuilds to test these nice "required by cat/pkg[foo]" messages
+ "dev-util/Q-1": { "DEPEND": "foo? ( dev-util/R[bar] )", "IUSE": "+foo", "EAPI": 2 },
+ "dev-util/Q-2": { "RDEPEND": "!foo? ( dev-util/R[bar] )", "IUSE": "foo", "EAPI": 2 },
+ "dev-util/R-1": { "IUSE": "bar" },
+
+ #ebuilds to test interaction with REQUIRED_USE
+ "app-portage/A-1": { "DEPEND": "app-portage/B[foo]", "EAPI": 2 },
+ "app-portage/A-2": { "DEPEND": "app-portage/B[foo=]", "IUSE": "+foo", "REQUIRED_USE": "foo", "EAPI": "4" },
+
+ "app-portage/B-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-portage/C-1": { "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ }
+
+ test_cases = (
+ #Test USE changes.
+ #The simple case.
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1"],
+ options = {"--autounmask": "n"},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+ use_changes = { "dev-libs/B-1": {"foo": True} } ),
+
+ #Make sure we restart if needed.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1", "dev-libs/B"],
+ options = {"--autounmask": True},
+ all_permutations = True,
+ success = False,
+ mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+ use_changes = { "dev-libs/B-1": {"foo": True} } ),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1", "dev-libs/A:2", "dev-libs/B"],
+ options = {"--autounmask": True},
+ all_permutations = True,
+ success = False,
+ mergelist = ["dev-libs/D-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", "dev-libs/A-2"],
+ ignore_mergelist_order = True,
+ use_changes = { "dev-libs/B-1": {"foo": True, "bar": True} } ),
+
+ #Test keywording.
+ #The simple case.
+
+ ResolverPlaygroundTestCase(
+ ["app-misc/Z"],
+ options = {"--autounmask": "n"},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["app-misc/Z"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["app-misc/Y-1", "app-misc/Z-1"],
+ unstable_keywords = ["app-misc/Y-1", "app-misc/Z-1"]),
+
+ #Make sure that the backtracking for slot conflicts handles our mess.
+
+ ResolverPlaygroundTestCase(
+ ["=app-misc/V-1", "app-misc/W"],
+ options = {"--autounmask": True},
+ all_permutations = True,
+ success = False,
+ mergelist = ["app-misc/W-2", "app-misc/V-1"],
+ unstable_keywords = ["app-misc/W-2", "app-misc/V-1"]),
+
+ #Mixed testing
+ #Make sure we don't change use for something in a || dep if there is another choice
+ #that needs no change.
+
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-1"],
+ options = {"--autounmask": True},
+ success = True,
+ mergelist = ["sci-libs/P-1", "sci-libs/K-1"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-2"],
+ options = {"--autounmask": True},
+ success = True,
+ mergelist = ["sci-libs/P-1", "sci-libs/K-2"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-3"],
+ options = {"--autounmask": True},
+ success = True,
+ mergelist = ["sci-libs/P-1", "sci-libs/K-3"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-4"],
+ options = {"--autounmask": True},
+ success = True,
+ mergelist = ["sci-libs/P-1", "sci-libs/K-4"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-5"],
+ options = {"--autounmask": True},
+ success = True,
+ mergelist = ["sci-libs/P-1", "sci-libs/K-5"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-6"],
+ options = {"--autounmask": True},
+ success = True,
+ mergelist = ["sci-libs/P-1", "sci-libs/K-6"]),
+
+ #Make sure we prefer use changes over keyword changes.
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-7"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["sci-libs/L-1", "sci-libs/K-7"],
+ use_changes = { "sci-libs/L-1": { "bar": True } }),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-8"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["sci-libs/L-1", "sci-libs/K-8"],
+ use_changes = { "sci-libs/L-1": { "bar": True } }),
+
+ #Test these nice "required by cat/pkg[foo]" messages.
+ ResolverPlaygroundTestCase(
+ ["=dev-util/Q-1"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-util/R-1", "dev-util/Q-1"],
+ use_changes = { "dev-util/R-1": { "bar": True } }),
+ ResolverPlaygroundTestCase(
+ ["=dev-util/Q-2"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-util/R-1", "dev-util/Q-2"],
+ use_changes = { "dev-util/R-1": { "bar": True } }),
+
+ #Test interaction with REQUIRED_USE.
+ ResolverPlaygroundTestCase(
+ ["=app-portage/A-1"],
+ options = { "--autounmask": True },
+ use_changes = None,
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=app-portage/A-2"],
+ options = { "--autounmask": True },
+ use_changes = None,
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=app-portage/C-1"],
+ options = { "--autounmask": True },
+ use_changes = None,
+ success = False),
+
+ #Make sure we don't change masked/forced flags.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:1"],
+ options = {"--autounmask": True},
+ use_changes = None,
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:2"],
+ options = {"--autounmask": True},
+ use_changes = None,
+ success = False),
+
+ #Test mask and keyword changes.
+ ResolverPlaygroundTestCase(
+ ["app-text/A"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["app-text/A-1"],
+ needed_p_mask_changes = ["app-text/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["app-text/B"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["app-text/B-1"],
+ unstable_keywords = ["app-text/B-1"],
+ needed_p_mask_changes = ["app-text/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["app-text/C"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["app-text/C-1"],
+ unstable_keywords = ["app-text/C-1"],
+ needed_p_mask_changes = ["app-text/C-1"]),
+ #Make sure unstable keyword is preferred over missing keyword
+ ResolverPlaygroundTestCase(
+ ["app-text/D"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["app-text/D-1"],
+ unstable_keywords = ["app-text/D-1"]),
+ #Test missing keyword
+ ResolverPlaygroundTestCase(
+ ["=app-text/D-2"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["app-text/D-2"],
+ unstable_keywords = ["app-text/D-2"])
+ )
+
+ profile = {
+ "use.mask":
+ (
+ "masked-flag",
+ ),
+ "use.force":
+ (
+ "forced-flag",
+ ),
+ "package.mask":
+ (
+ "app-text/A",
+ "app-text/B",
+ "app-text/C",
+ ),
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testAutounmaskForLicenses(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "LICENSE": "TEST" },
+ "dev-libs/B-1": { "LICENSE": "TEST", "IUSE": "foo", "KEYWORDS": "~x86"},
+ "dev-libs/C-1": { "DEPEND": "dev-libs/B[foo]", "EAPI": 2 },
+
+ "dev-libs/D-1": { "DEPEND": "dev-libs/E dev-libs/F", "LICENSE": "TEST" },
+ "dev-libs/E-1": { "LICENSE": "TEST" },
+ "dev-libs/E-2": { "LICENSE": "TEST" },
+ "dev-libs/F-1": { "DEPEND": "=dev-libs/E-1", "LICENSE": "TEST" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options = {"--autounmask": 'n'},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/A-1"],
+ license_changes = { "dev-libs/A-1": set(["TEST"]) }),
+
+ #Test license+keyword+use change at once.
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/C-1"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/B-1", "dev-libs/C-1"],
+ license_changes = { "dev-libs/B-1": set(["TEST"]) },
+ unstable_keywords = ["dev-libs/B-1"],
+ use_changes = { "dev-libs/B-1": { "foo": True } }),
+
+ #Test license with backtracking.
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/D-1"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/E-1", "dev-libs/F-1", "dev-libs/D-1"],
+ license_changes = { "dev-libs/D-1": set(["TEST"]), "dev-libs/E-1": set(["TEST"]), "dev-libs/E-2": set(["TEST"]), "dev-libs/F-1": set(["TEST"]) }),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py b/portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py
new file mode 100644
index 0000000..fc49306
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py
@@ -0,0 +1,169 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class BacktrackingTestCase(TestCase):
+
+ def testBacktracking(self):
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/A-2": {},
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1", "dev-libs/B"],
+ all_permutations = True,
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1"],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testHittingTheBacktrackLimit(self):
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/A-2": {},
+ "dev-libs/B-1": {},
+ "dev-libs/B-2": {},
+ "dev-libs/C-1": { "DEPEND": "dev-libs/A dev-libs/B" },
+ "dev-libs/D-1": { "DEPEND": "=dev-libs/A-1 =dev-libs/B-1" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C", "dev-libs/D"],
+ all_permutations = True,
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order = True,
+ success = True),
+ #This one hits the backtrack limit. Be aware that this depends on the argument order.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D", "dev-libs/C"],
+ options = { "--backtrack": 1 },
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/A-2", "dev-libs/B-2", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = [],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testBacktrackingGoodVersionFirst(self):
+ """
+ When backtracking due to slot conflicts, we masked the version that has been pulled
+ in first. This is not always a good idea. Mask the highest version instead.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "=dev-libs/C-1 dev-libs/B" },
+ "dev-libs/B-1": { "DEPEND": "=dev-libs/C-1" },
+ "dev-libs/B-2": { "DEPEND": "=dev-libs/C-2" },
+ "dev-libs/C-1": { },
+ "dev-libs/C-2": { },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", ],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testBacktrackWithoutUpdates(self):
+ """
+ If --update is not given we might have to mask the old installed version later.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/Z" },
+ "dev-libs/B-1": { "DEPEND": ">=dev-libs/Z-2" },
+ "dev-libs/Z-1": { },
+ "dev-libs/Z-2": { },
+ }
+
+ installed = {
+ "dev-libs/Z-1": { "USE": "" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B", "dev-libs/A"],
+ all_permutations = True,
+ mergelist = ["dev-libs/Z-2", "dev-libs/B-1", "dev-libs/A-1", ],
+ ignore_mergelist_order = True,
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testBacktrackMissedUpdates(self):
+ """
+ An update is missed due to a dependency on an older version.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { },
+ "dev-libs/B-1": { "RDEPEND": "<=dev-libs/A-1" },
+ }
+
+ installed = {
+ "dev-libs/A-1": { "USE": "" },
+ "dev-libs/B-1": { "USE": "", "RDEPEND": "<=dev-libs/A-1" },
+ }
+
+ options = {'--update' : True, '--deep' : True, '--selective' : True}
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ options = options,
+ all_permutations = True,
+ mergelist = [],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py b/portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py
new file mode 100644
index 0000000..f8331ac
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py
@@ -0,0 +1,84 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class CircularDependencyTestCase(TestCase):
+
+ #TODO:
+ # use config change by autounmask
+ # conflict on parent's parent
+ # difference in RDEPEND and DEPEND
+ # is there anything else than priority buildtime and runtime?
+ # play with use.{mask,force}
+ # play with REQUIRED_USE
+
+
+ def testCircularDependency(self):
+
+ ebuilds = {
+ "dev-libs/Z-1": { "DEPEND": "foo? ( !bar? ( dev-libs/Y ) )", "IUSE": "+foo bar", "EAPI": 1 },
+ "dev-libs/Z-2": { "DEPEND": "foo? ( dev-libs/Y ) !bar? ( dev-libs/Y )", "IUSE": "+foo bar", "EAPI": 1 },
+ "dev-libs/Z-3": { "DEPEND": "foo? ( !bar? ( dev-libs/Y ) ) foo? ( dev-libs/Y ) !bar? ( dev-libs/Y )", "IUSE": "+foo bar", "EAPI": 1 },
+ "dev-libs/Y-1": { "DEPEND": "dev-libs/Z" },
+ "dev-libs/W-1": { "DEPEND": "dev-libs/Z[foo] dev-libs/Y", "EAPI": 2 },
+ "dev-libs/W-2": { "DEPEND": "dev-libs/Z[foo=] dev-libs/Y", "IUSE": "+foo", "EAPI": 2 },
+ "dev-libs/W-3": { "DEPEND": "dev-libs/Z[bar] dev-libs/Y", "EAPI": 2 },
+
+ "app-misc/A-1": { "DEPEND": "foo? ( =app-misc/B-1 )", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-misc/A-2": { "DEPEND": "foo? ( =app-misc/B-2 ) bar? ( =app-misc/B-2 )", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-misc/B-1": { "DEPEND": "=app-misc/A-1" },
+ "app-misc/B-2": { "DEPEND": "=app-misc/A-2" },
+ }
+
+ test_cases = (
+ #Simple tests
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/Z-1"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False)]), frozenset([("bar", True)])])},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/Z-2"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/Z-3"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+
+ #Conflict on parent
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/W-1"],
+ circular_dependency_solutions = {},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/W-2"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+
+ #Conflict with autounmask
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/W-3"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False)])])},
+ use_changes = { "dev-libs/Z-3": {"bar": True}},
+ success = False),
+
+ #Conflict with REQUIRED_USE
+ ResolverPlaygroundTestCase(
+ ["=app-misc/B-1"],
+ circular_dependency_solutions = { "app-misc/B-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=app-misc/B-2"],
+ circular_dependency_solutions = {},
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_depclean.py b/portage_with_autodep/pym/portage/tests/resolver/test_depclean.py
new file mode 100644
index 0000000..ba70144
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_depclean.py
@@ -0,0 +1,285 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleDepcleanTestCase(TestCase):
+
+ def testSimpleDepclean(self):
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1": {},
+ }
+ installed = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1": {},
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanWithDepsTestCase(TestCase):
+
+ def testDepcleanWithDeps(self):
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "dev-libs/C" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
+ "dev-libs/C-1": {},
+ "dev-libs/D-1": { "RDEPEND": "dev-libs/E" },
+ "dev-libs/E-1": { "RDEPEND": "dev-libs/F" },
+ "dev-libs/F-1": {},
+ }
+ installed = {
+ "dev-libs/A-1": { "RDEPEND": "dev-libs/C" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
+ "dev-libs/C-1": {},
+ "dev-libs/D-1": { "RDEPEND": "dev-libs/E" },
+ "dev-libs/E-1": { "RDEPEND": "dev-libs/F" },
+ "dev-libs/F-1": {},
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = ["dev-libs/B-1", "dev-libs/D-1",
+ "dev-libs/E-1", "dev-libs/F-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+class DepcleanWithInstalledMaskedTestCase(TestCase):
+
+ def testDepcleanWithInstalledMasked(self):
+ """
+ Test case for bug 332719.
+ emerge --declean ignores that B is masked by license and removes C.
+ The next emerge -uDN world doesn't take B and installs C again.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "|| ( dev-libs/B dev-libs/C )" },
+ "dev-libs/B-1": { "LICENSE": "TEST", "KEYWORDS": "x86" },
+ "dev-libs/C-1": { "KEYWORDS": "x86" },
+ }
+ installed = {
+ "dev-libs/A-1": { "RDEPEND": "|| ( dev-libs/B dev-libs/C )" },
+ "dev-libs/B-1": { "LICENSE": "TEST", "KEYWORDS": "x86" },
+ "dev-libs/C-1": { "KEYWORDS": "x86" },
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True},
+ success = True,
+ #cleanlist = ["dev-libs/C-1"]),
+ cleanlist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanInstalledKeywordMaskedSlotTestCase(TestCase):
+
+ def testDepcleanInstalledKeywordMaskedSlot(self):
+ """
+ Verify that depclean removes newer slot
+ masked by KEYWORDS (see bug #350285).
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "|| ( =dev-libs/B-2.7* =dev-libs/B-2.6* )" },
+ "dev-libs/B-2.6": { "SLOT":"2.6", "KEYWORDS": "x86" },
+ "dev-libs/B-2.7": { "SLOT":"2.7", "KEYWORDS": "~x86" },
+ }
+ installed = {
+ "dev-libs/A-1": { "EAPI" : "3", "RDEPEND": "|| ( dev-libs/B:2.7 dev-libs/B:2.6 )" },
+ "dev-libs/B-2.6": { "SLOT":"2.6", "KEYWORDS": "x86" },
+ "dev-libs/B-2.7": { "SLOT":"2.7", "KEYWORDS": "~x86" },
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = ["dev-libs/B-2.7"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanWithExcludeTestCase(TestCase):
+
+ def testDepcleanWithExclude(self):
+
+ installed = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/A" },
+ }
+
+ test_cases = (
+ #Without --exclude.
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = ["dev-libs/B-1", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = []),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = ["dev-libs/B-1"]),
+
+ #With --exclude
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True, "--exclude": ["dev-libs/A"]},
+ success = True,
+ cleanlist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--depclean": True, "--exclude": ["dev-libs/B"]},
+ success = True,
+ cleanlist = []),
+ )
+
+ playground = ResolverPlayground(installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanWithExcludeAndSlotsTestCase(TestCase):
+
+ def testDepcleanWithExcludeAndSlots(self):
+
+ installed = {
+ "dev-libs/Z-1": { "SLOT": 1},
+ "dev-libs/Z-2": { "SLOT": 2},
+ "dev-libs/Y-1": { "RDEPEND": "=dev-libs/Z-1", "SLOT": 1 },
+ "dev-libs/Y-2": { "RDEPEND": "=dev-libs/Z-2", "SLOT": 2 },
+ }
+
+ world = [ "dev-libs/Y" ]
+
+ test_cases = (
+ #Without --exclude.
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = ["dev-libs/Y-1", "dev-libs/Z-1"]),
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True, "--exclude": ["dev-libs/Z"]},
+ success = True,
+ cleanlist = ["dev-libs/Y-1"]),
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True, "--exclude": ["dev-libs/Y"]},
+ success = True,
+ cleanlist = []),
+ )
+
+ playground = ResolverPlayground(installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanAndWildcardsTestCase(TestCase):
+
+ def testDepcleanAndWildcards(self):
+
+ installed = {
+ "dev-libs/A-1": { "RDEPEND": "dev-libs/B" },
+ "dev-libs/B-1": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["*/*"],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = ["dev-libs/A-1", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/*"],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = ["dev-libs/A-1", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["*/A"],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = ["dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["*/B"],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = []),
+ )
+
+ playground = ResolverPlayground(installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_depth.py b/portage_with_autodep/pym/portage/tests/resolver/test_depth.py
new file mode 100644
index 0000000..cb1e2dd
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_depth.py
@@ -0,0 +1,252 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class ResolverDepthTestCase(TestCase):
+
+ def testResolverDepth(self):
+
+ ebuilds = {
+ "dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
+ "dev-libs/A-2": {"RDEPEND" : "dev-libs/B"},
+ "dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
+ "dev-libs/B-2": {"RDEPEND" : "dev-libs/C"},
+ "dev-libs/C-1": {},
+ "dev-libs/C-2": {},
+
+ "virtual/libusb-0" : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
+ "virtual/libusb-1" : {"EAPI" :"2", "SLOT" : "1", "RDEPEND" : ">=dev-libs/libusb-1.0.4:1"},
+ "dev-libs/libusb-0.1.13" : {},
+ "dev-libs/libusb-1.0.5" : {"SLOT":"1"},
+ "dev-libs/libusb-compat-1" : {},
+ "sys-freebsd/freebsd-lib-8": {"IUSE" : "+usb"},
+
+ "sys-fs/udev-164" : {"EAPI" : "1", "RDEPEND" : "virtual/libusb:0"},
+
+ "virtual/jre-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
+ "virtual/jre-1.5.0-r1" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
+ "virtual/jre-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
+ "virtual/jre-1.6.0-r1" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
+ "virtual/jdk-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+ "virtual/jdk-1.5.0-r1" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+ "virtual/jdk-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.6.0-r1" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "dev-java/gcj-jdk-4.5" : {},
+ "dev-java/gcj-jdk-4.5-r1" : {},
+ "dev-java/icedtea-6.1" : {},
+ "dev-java/icedtea-6.1-r1" : {},
+ "dev-java/sun-jdk-1.5" : {"SLOT" : "1.5"},
+ "dev-java/sun-jdk-1.6" : {"SLOT" : "1.6"},
+ "dev-java/sun-jre-bin-1.5" : {"SLOT" : "1.5"},
+ "dev-java/sun-jre-bin-1.6" : {"SLOT" : "1.6"},
+
+ "dev-java/ant-core-1.8" : {"DEPEND" : ">=virtual/jdk-1.4"},
+ "dev-db/hsqldb-1.8" : {"RDEPEND" : ">=virtual/jre-1.6"},
+ }
+
+ installed = {
+ "dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
+ "dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
+ "dev-libs/C-1": {},
+
+ "virtual/jre-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =virtual/jdk-1.5.0* =dev-java/sun-jre-bin-1.5.0* )"},
+ "virtual/jre-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =virtual/jdk-1.6.0* =dev-java/sun-jre-bin-1.6.0* )"},
+ "virtual/jdk-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+ "virtual/jdk-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "dev-java/gcj-jdk-4.5" : {},
+ "dev-java/icedtea-6.1" : {},
+
+ "virtual/libusb-0" : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
+ }
+
+ world = ["dev-libs/A"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--update": True, "--deep": 0},
+ success = True,
+ mergelist = ["dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--update": True, "--deep": 1},
+ success = True,
+ mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--update": True, "--deep": 2},
+ success = True,
+ mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--emptytree": True},
+ success = True,
+ mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--selective": True, "--deep": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--deep": 2},
+ success = True,
+ mergelist = ["dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {},
+ success = True,
+ mergelist = ['virtual/jre-1.6.0-r1']),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--deep" : True},
+ success = True,
+ mergelist = ['virtual/jre-1.6.0-r1']),
+
+ # Test bug #141118, where we avoid pulling in
+ # redundant deps, satisfying nested virtuals
+ # as efficiently as possible.
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--selective" : True, "--deep" : True},
+ success = True,
+ mergelist = []),
+
+ # Test bug #150361, where depgraph._greedy_slots()
+ # is triggered by --update with AtomArg.
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--update" : True},
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [('virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
+
+ # Recursively traversed virtual dependencies, and their
+ # direct dependencies, are considered to have the same
+ # depth as direct dependencies.
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--update" : True, "--deep" : 1},
+ success = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions=(('dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1'), ('virtual/jdk-1.6.0-r1', 'virtual/jre-1.6.0-r1'),
+ ('dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.5.0-r1'), ('virtual/jdk-1.5.0-r1', 'virtual/jre-1.5.0-r1')),
+ mergelist = [('dev-java/icedtea-6.1-r1', 'dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.6.0-r1', 'virtual/jdk-1.5.0-r1', 'virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre:1.5"],
+ options = {"--update" : True},
+ success = True,
+ mergelist = ['virtual/jre-1.5.0-r1']),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre:1.6"],
+ options = {"--update" : True},
+ success = True,
+ mergelist = ['virtual/jre-1.6.0-r1']),
+
+ # Test that we don't pull in any unnecessary updates
+ # when --update is not specified, even though we
+ # specified --deep.
+ ResolverPlaygroundTestCase(
+ ["dev-java/ant-core"],
+ options = {"--deep" : True},
+ success = True,
+ mergelist = ["dev-java/ant-core-1.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-java/ant-core"],
+ options = {"--update" : True},
+ success = True,
+ mergelist = ["dev-java/ant-core-1.8"]),
+
+ # Recursively traversed virtual dependencies, and their
+ # direct dependencies, are considered to have the same
+ # depth as direct dependencies.
+ ResolverPlaygroundTestCase(
+ ["dev-java/ant-core"],
+ options = {"--update" : True, "--deep" : 1},
+ success = True,
+ mergelist = ['dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1', 'dev-java/ant-core-1.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-db/hsqldb"],
+ options = {"--deep" : True},
+ success = True,
+ mergelist = ["dev-db/hsqldb-1.8"]),
+
+ # Don't traverse deps of an installed package with --deep=0,
+ # even if it's a virtual.
+ ResolverPlaygroundTestCase(
+ ["virtual/libusb:0"],
+ options = {"--selective" : True, "--deep" : 0},
+ success = True,
+ mergelist = []),
+
+ # Satisfy unsatisfied dep of installed package with --deep=1.
+ ResolverPlaygroundTestCase(
+ ["virtual/libusb:0"],
+ options = {"--selective" : True, "--deep" : 1},
+ success = True,
+ mergelist = ['dev-libs/libusb-0.1.13']),
+
+ # Pull in direct dep of virtual, even with --deep=0.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--deep" : 0},
+ success = True,
+ mergelist = ['dev-libs/libusb-0.1.13', 'sys-fs/udev-164']),
+
+ # Test --nodeps with direct virtual deps.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--nodeps" : True},
+ success = True,
+ mergelist = ["sys-fs/udev-164"]),
+
+ # Test that --nodeps overrides --deep.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--nodeps" : True, "--deep" : True},
+ success = True,
+ mergelist = ["sys-fs/udev-164"]),
+
+ # Test that --nodeps overrides --emptytree.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--nodeps" : True, "--emptytree" : True},
+ success = True,
+ mergelist = ["sys-fs/udev-164"]),
+
+ # Test --emptytree with virtuals.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--emptytree" : True},
+ success = True,
+ mergelist = ['dev-libs/libusb-0.1.13', 'virtual/libusb-0', 'sys-fs/udev-164']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_eapi.py b/portage_with_autodep/pym/portage/tests/resolver/test_eapi.py
new file mode 100644
index 0000000..525b585
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_eapi.py
@@ -0,0 +1,115 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class EAPITestCase(TestCase):
+
+ def testEAPI(self):
+
+ ebuilds = {
+ #EAPI-1: IUSE-defaults
+ "dev-libs/A-1.0": { "EAPI": 0, "IUSE": "+foo" },
+ "dev-libs/A-1.1": { "EAPI": 1, "IUSE": "+foo" },
+ "dev-libs/A-1.2": { "EAPI": 2, "IUSE": "+foo" },
+ "dev-libs/A-1.3": { "EAPI": 3, "IUSE": "+foo" },
+ "dev-libs/A-1.4": { "EAPI": "4", "IUSE": "+foo" },
+
+ #EAPI-1: slot deps
+ "dev-libs/A-2.0": { "EAPI": 0, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.1": { "EAPI": 1, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.2": { "EAPI": 2, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.3": { "EAPI": 3, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.4": { "EAPI": "4", "DEPEND": "dev-libs/B:0" },
+
+ #EAPI-2: use deps
+ "dev-libs/A-3.0": { "EAPI": 0, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.1": { "EAPI": 1, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.2": { "EAPI": 2, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.3": { "EAPI": 3, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.4": { "EAPI": "4", "DEPEND": "dev-libs/B[foo]" },
+
+ #EAPI-2: strong blocks
+ "dev-libs/A-4.0": { "EAPI": 0, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.1": { "EAPI": 1, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.2": { "EAPI": 2, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.3": { "EAPI": 3, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.4": { "EAPI": "4", "DEPEND": "!!dev-libs/B" },
+
+ #EAPI-4: slot operator deps
+ #~ "dev-libs/A-5.0": { "EAPI": 0, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.1": { "EAPI": 1, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.2": { "EAPI": 2, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.3": { "EAPI": 3, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.4": { "EAPI": "4", "DEPEND": "dev-libs/B:*" },
+
+ #EAPI-4: use dep defaults
+ "dev-libs/A-6.0": { "EAPI": 0, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.1": { "EAPI": 1, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.2": { "EAPI": 2, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.3": { "EAPI": 3, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.4": { "EAPI": "4", "DEPEND": "dev-libs/B[bar(+)]" },
+
+ #EAPI-4: REQUIRED_USE
+ "dev-libs/A-7.0": { "EAPI": 0, "IUSE": "foo bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.1": { "EAPI": 1, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.2": { "EAPI": 2, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.3": { "EAPI": 3, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.4": { "EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+
+ "dev-libs/B-1": {"EAPI": 1, "IUSE": "+foo"},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.1"], success = True, mergelist = ["dev-libs/A-1.1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.2"], success = True, mergelist = ["dev-libs/A-1.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.3"], success = True, mergelist = ["dev-libs/A-1.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.4"], success = True, mergelist = ["dev-libs/A-1.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.1"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.2"], success = True, mergelist = ["dev-libs/A-4.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.3"], success = True, mergelist = ["dev-libs/A-4.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.4"], success = True, mergelist = ["dev-libs/A-4.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.3"], success = False),
+ # not implemented: EAPI-4: slot operator deps
+ #~ ResolverPlaygroundTestCase(["=dev-libs/A-5.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-5.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.3"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-6.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.3"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.4"], success = True, mergelist = ["dev-libs/A-7.4"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py b/portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py
new file mode 100644
index 0000000..0a52c81
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py
@@ -0,0 +1,453 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class MergeOrderTestCase(TestCase):
+
+ def testMergeOrder(self):
+ ebuilds = {
+ "app-misc/blocker-buildtime-a-1" : {},
+ "app-misc/blocker-buildtime-unbuilt-a-1" : {
+ "DEPEND" : "!app-misc/installed-blocker-a",
+ },
+ "app-misc/blocker-buildtime-unbuilt-hard-a-1" : {
+ "EAPI" : "2",
+ "DEPEND" : "!!app-misc/installed-blocker-a",
+ },
+ "app-misc/blocker-update-order-a-1" : {},
+ "app-misc/blocker-update-order-hard-a-1" : {},
+ "app-misc/blocker-update-order-hard-unsolvable-a-1" : {},
+ "app-misc/blocker-runtime-a-1" : {},
+ "app-misc/blocker-runtime-b-1" : {},
+ "app-misc/blocker-runtime-hard-a-1" : {},
+ "app-misc/circ-buildtime-a-0": {},
+ "app-misc/circ-buildtime-a-1": {
+ "RDEPEND": "app-misc/circ-buildtime-b",
+ },
+ "app-misc/circ-buildtime-b-1": {
+ "RDEPEND": "app-misc/circ-buildtime-c",
+ },
+ "app-misc/circ-buildtime-c-1": {
+ "DEPEND": "app-misc/circ-buildtime-a",
+ },
+ "app-misc/circ-buildtime-unsolvable-a-1": {
+ "RDEPEND": "app-misc/circ-buildtime-unsolvable-b",
+ },
+ "app-misc/circ-buildtime-unsolvable-b-1": {
+ "RDEPEND": "app-misc/circ-buildtime-unsolvable-c",
+ },
+ "app-misc/circ-buildtime-unsolvable-c-1": {
+ "DEPEND": "app-misc/circ-buildtime-unsolvable-a",
+ },
+ "app-misc/circ-post-runtime-a-1": {
+ "PDEPEND": "app-misc/circ-post-runtime-b",
+ },
+ "app-misc/circ-post-runtime-b-1": {
+ "RDEPEND": "app-misc/circ-post-runtime-c",
+ },
+ "app-misc/circ-post-runtime-c-1": {
+ "RDEPEND": "app-misc/circ-post-runtime-a",
+ },
+ "app-misc/circ-runtime-a-1": {
+ "RDEPEND": "app-misc/circ-runtime-b",
+ },
+ "app-misc/circ-runtime-b-1": {
+ "RDEPEND": "app-misc/circ-runtime-c",
+ },
+ "app-misc/circ-runtime-c-1": {
+ "RDEPEND": "app-misc/circ-runtime-a",
+ },
+ "app-misc/circ-satisfied-a-0": {
+ "RDEPEND": "app-misc/circ-satisfied-b",
+ },
+ "app-misc/circ-satisfied-a-1": {
+ "RDEPEND": "app-misc/circ-satisfied-b",
+ },
+ "app-misc/circ-satisfied-b-0": {
+ "RDEPEND": "app-misc/circ-satisfied-c",
+ },
+ "app-misc/circ-satisfied-b-1": {
+ "RDEPEND": "app-misc/circ-satisfied-c",
+ },
+ "app-misc/circ-satisfied-c-0": {
+ "DEPEND": "app-misc/circ-satisfied-a",
+ "RDEPEND": "app-misc/circ-satisfied-a",
+ },
+ "app-misc/circ-satisfied-c-1": {
+ "DEPEND": "app-misc/circ-satisfied-a",
+ "RDEPEND": "app-misc/circ-satisfied-a",
+ },
+ "app-misc/circ-smallest-a-1": {
+ "RDEPEND": "app-misc/circ-smallest-b",
+ },
+ "app-misc/circ-smallest-b-1": {
+ "RDEPEND": "app-misc/circ-smallest-a",
+ },
+ "app-misc/circ-smallest-c-1": {
+ "RDEPEND": "app-misc/circ-smallest-d",
+ },
+ "app-misc/circ-smallest-d-1": {
+ "RDEPEND": "app-misc/circ-smallest-e",
+ },
+ "app-misc/circ-smallest-e-1": {
+ "RDEPEND": "app-misc/circ-smallest-c",
+ },
+ "app-misc/circ-smallest-f-1": {
+ "RDEPEND": "app-misc/circ-smallest-g app-misc/circ-smallest-a app-misc/circ-smallest-c",
+ },
+ "app-misc/circ-smallest-g-1": {
+ "RDEPEND": "app-misc/circ-smallest-f",
+ },
+ "app-misc/installed-blocker-a-1" : {
+ "EAPI" : "2",
+ "DEPEND" : "!app-misc/blocker-buildtime-a",
+ "RDEPEND" : "!app-misc/blocker-runtime-a !app-misc/blocker-runtime-b !!app-misc/blocker-runtime-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-a-1" : {
+ "RDEPEND" : "!app-misc/blocker-update-order-a",
+ },
+ "app-misc/installed-old-version-blocks-a-2" : {},
+ "app-misc/installed-old-version-blocks-hard-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-a-2" : {},
+ "app-misc/installed-old-version-blocks-hard-unsolvable-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-unsolvable-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-unsolvable-a-2" : {
+ "DEPEND" : "app-misc/blocker-update-order-hard-unsolvable-a",
+ "RDEPEND" : "",
+ },
+ "app-misc/some-app-a-1": {
+ "RDEPEND": "app-misc/circ-runtime-a app-misc/circ-runtime-b",
+ },
+ "app-misc/some-app-b-1": {
+ "RDEPEND": "app-misc/circ-post-runtime-a app-misc/circ-post-runtime-b",
+ },
+ "app-misc/some-app-c-1": {
+ "RDEPEND": "app-misc/circ-buildtime-a app-misc/circ-buildtime-b",
+ },
+ "app-admin/eselect-python-20100321" : {},
+ "sys-apps/portage-2.1.9.42" : {
+ "DEPEND" : "dev-lang/python",
+ "RDEPEND" : "dev-lang/python",
+ },
+ "sys-apps/portage-2.1.9.49" : {
+ "DEPEND" : "dev-lang/python >=app-admin/eselect-python-20091230",
+ "RDEPEND" : "dev-lang/python",
+ },
+ "dev-lang/python-3.1" : {},
+ "dev-lang/python-3.2" : {},
+ "virtual/libc-0" : {
+ "RDEPEND" : "sys-libs/glibc",
+ },
+ "sys-devel/gcc-4.5.2" : {},
+ "sys-devel/binutils-2.18" : {},
+ "sys-devel/binutils-2.20.1" : {},
+ "sys-libs/glibc-2.11" : {
+ "DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+ "RDEPEND": "",
+ },
+ "sys-libs/glibc-2.13" : {
+ "DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+ "RDEPEND": "",
+ },
+ "virtual/os-headers-0" : {
+ "RDEPEND" : "sys-kernel/linux-headers",
+ },
+ "sys-kernel/linux-headers-2.6.38": {
+ "DEPEND" : "app-arch/xz-utils",
+ "RDEPEND": "",
+ },
+ "sys-kernel/linux-headers-2.6.39": {
+ "DEPEND" : "app-arch/xz-utils",
+ "RDEPEND": "",
+ },
+ "app-arch/xz-utils-5.0.1" : {},
+ "app-arch/xz-utils-5.0.2" : {},
+ "dev-util/pkgconfig-0.25-r2" : {},
+ "kde-base/kdelibs-3.5.7" : {
+ "PDEPEND" : "kde-misc/kdnssd-avahi",
+ },
+ "kde-misc/kdnssd-avahi-0.1.2" : {
+ "DEPEND" : "kde-base/kdelibs app-arch/xz-utils dev-util/pkgconfig",
+ "RDEPEND" : "kde-base/kdelibs",
+ },
+ "kde-base/kdnssd-3.5.7" : {
+ "DEPEND" : "kde-base/kdelibs",
+ "RDEPEND" : "kde-base/kdelibs",
+ },
+ "kde-base/libkdegames-3.5.7" : {
+ "DEPEND" : "kde-base/kdelibs",
+ "RDEPEND" : "kde-base/kdelibs",
+ },
+ "kde-base/kmines-3.5.7" : {
+ "DEPEND" : "kde-base/libkdegames",
+ "RDEPEND" : "kde-base/libkdegames",
+ },
+ "media-video/libav-0.7_pre20110327" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "RDEPEND" : "!media-video/ffmpeg",
+ },
+ "media-video/ffmpeg-0.7_rc1" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ },
+ "virtual/ffmpeg-0.6.90" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
+ },
+ }
+
+ installed = {
+ "app-misc/circ-buildtime-a-0": {},
+ "app-misc/circ-satisfied-a-0": {
+ "RDEPEND": "app-misc/circ-satisfied-b",
+ },
+ "app-misc/circ-satisfied-b-0": {
+ "RDEPEND": "app-misc/circ-satisfied-c",
+ },
+ "app-misc/circ-satisfied-c-0": {
+ "DEPEND": "app-misc/circ-satisfied-a",
+ "RDEPEND": "app-misc/circ-satisfied-a",
+ },
+ "app-misc/installed-blocker-a-1" : {
+ "EAPI" : "2",
+ "DEPEND" : "!app-misc/blocker-buildtime-a",
+ "RDEPEND" : "!app-misc/blocker-runtime-a !app-misc/blocker-runtime-b !!app-misc/blocker-runtime-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-a-1" : {
+ "RDEPEND" : "!app-misc/blocker-update-order-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-unsolvable-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-unsolvable-a",
+ },
+ "sys-apps/portage-2.1.9.42" : {
+ "DEPEND" : "dev-lang/python",
+ "RDEPEND" : "dev-lang/python",
+ },
+ "dev-lang/python-3.1" : {},
+ "virtual/libc-0" : {
+ "RDEPEND" : "sys-libs/glibc",
+ },
+ "sys-devel/binutils-2.18" : {},
+ "sys-libs/glibc-2.11" : {
+ "DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+ "RDEPEND": "",
+ },
+ "virtual/os-headers-0" : {
+ "RDEPEND" : "sys-kernel/linux-headers",
+ },
+ "sys-kernel/linux-headers-2.6.38": {
+ "DEPEND" : "app-arch/xz-utils",
+ "RDEPEND": "",
+ },
+ "app-arch/xz-utils-5.0.1" : {},
+ "media-video/ffmpeg-0.7_rc1" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "USE" : "encode",
+ },
+ "virtual/ffmpeg-0.6.90" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "USE" : "encode",
+ "RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-a"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/circ-runtime-a-1", "app-misc/circ-runtime-b-1", "app-misc/circ-runtime-c-1"), "app-misc/some-app-a-1"]),
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-a"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/circ-runtime-c-1", "app-misc/circ-runtime-b-1", "app-misc/circ-runtime-a-1"), "app-misc/some-app-a-1"]),
+ # Test unsolvable circular dep that is RDEPEND in one
+ # direction and DEPEND in the other.
+ ResolverPlaygroundTestCase(
+ ["app-misc/circ-buildtime-unsolvable-a"],
+ success = False,
+ circular_dependency_solutions = {}),
+ # Test optimal merge order for a circular dep that is
+ # RDEPEND in one direction and DEPEND in the other.
+ # This requires an installed instance of the DEPEND
+ # package in order to be solvable.
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-c", "app-misc/circ-buildtime-a"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/circ-buildtime-b-1", "app-misc/circ-buildtime-c-1"), "app-misc/circ-buildtime-a-1", "app-misc/some-app-c-1"]),
+ # Test optimal merge order for a circular dep that is
+ # RDEPEND in one direction and PDEPEND in the other.
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-b"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["app-misc/circ-post-runtime-a-1", ("app-misc/circ-post-runtime-b-1", "app-misc/circ-post-runtime-c-1"), "app-misc/some-app-b-1"]),
+ # Test optimal merge order for a circular dep that is
+ # RDEPEND in one direction and DEPEND in the other,
+ # with all dependencies initially satisfied. Optimally,
+ # the DEPEND/buildtime dep should be updated before the
+ # package that depends on it, even though it's feasible
+ # to update it later since it is already satisfied.
+ ResolverPlaygroundTestCase(
+ ["app-misc/circ-satisfied-a", "app-misc/circ-satisfied-b", "app-misc/circ-satisfied-c"],
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions = (("app-misc/circ-satisfied-a-1", "app-misc/circ-satisfied-c-1"),),
+ mergelist = [("app-misc/circ-satisfied-a-1", "app-misc/circ-satisfied-b-1", "app-misc/circ-satisfied-c-1")]),
+ # In the case of multiple runtime cycles, where some cycles
+ # may depend on smaller independent cycles, it's optimal
+ # to merge smaller independent cycles before other cycles
+ # that depend on them.
+ ResolverPlaygroundTestCase(
+ ["app-misc/circ-smallest-a", "app-misc/circ-smallest-c", "app-misc/circ-smallest-f"],
+ success = True,
+ ambiguous_merge_order = True,
+ all_permutations = True,
+ mergelist = [('app-misc/circ-smallest-a-1', 'app-misc/circ-smallest-b-1'),
+ ('app-misc/circ-smallest-c-1', 'app-misc/circ-smallest-d-1', 'app-misc/circ-smallest-e-1'),
+ ('app-misc/circ-smallest-f-1', 'app-misc/circ-smallest-g-1')]),
+ # installed package has buildtime-only blocker
+ # that should be ignored
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-buildtime-a"],
+ success = True,
+ mergelist = ["app-misc/blocker-buildtime-a-1"]),
+ # We're installing a package that an old version of
+ # an installed package blocks. However, an update is
+ # available to the old package. The old package should
+ # be updated first, in order to solve the blocker without
+ # any need for blocking packages to temporarily overlap.
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-update-order-a", "app-misc/installed-old-version-blocks-a"],
+ success = True,
+ all_permutations = True,
+ mergelist = ["app-misc/installed-old-version-blocks-a-2", "app-misc/blocker-update-order-a-1"]),
+ # This is the same as above but with a hard blocker. The hard
+ # blocker is solved automatically since the update makes it
+ # irrelevant.
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-update-order-hard-a", "app-misc/installed-old-version-blocks-hard-a"],
+ success = True,
+ all_permutations = True,
+ mergelist = ["app-misc/installed-old-version-blocks-hard-a-2", "app-misc/blocker-update-order-hard-a-1"]),
+ # This is similar to the above case except that it's unsolvable
+ # due to merge order, unless bug 250286 is implemented so that
+ # the installed blocker will be unmerged before installation
+ # of the package it blocks (rather than after like a soft blocker
+ # would be handled). The "unmerge before" behavior requested
+ # in bug 250286 must be optional since essential programs or
+ # libraries may be temporarily unavailable during a
+ # non-overlapping update like this.
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-update-order-hard-unsolvable-a", "app-misc/installed-old-version-blocks-hard-unsolvable-a"],
+ success = False,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions = (('app-misc/blocker-update-order-hard-unsolvable-a-1', 'app-misc/installed-old-version-blocks-hard-unsolvable-a-2'),),
+ mergelist = [('app-misc/blocker-update-order-hard-unsolvable-a-1', 'app-misc/installed-old-version-blocks-hard-unsolvable-a-2', '!!app-misc/blocker-update-order-hard-unsolvable-a')]),
+ # The installed package has runtime blockers that
+ # should cause it to be uninstalled. The uninstall
+ # task is executed only after blocking packages have
+ # been merged.
+ # TODO: distinguish between install/uninstall tasks in mergelist
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-runtime-a", "app-misc/blocker-runtime-b"],
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/blocker-runtime-a-1", "app-misc/blocker-runtime-b-1"), "app-misc/installed-blocker-a-1", ("!app-misc/blocker-runtime-a", "!app-misc/blocker-runtime-b")]),
+ # We have a soft buildtime blocker against an installed
+ # package that should cause it to be uninstalled. Note that with
+ # soft blockers, the blocking packages are allowed to temporarily
+ # overlap. This allows any essential programs/libraries provided
+ # by both packages to be available at all times.
+ # TODO: distinguish between install/uninstall tasks in mergelist
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-buildtime-unbuilt-a"],
+ success = True,
+ mergelist = ["app-misc/blocker-buildtime-unbuilt-a-1", "app-misc/installed-blocker-a-1", "!app-misc/installed-blocker-a"]),
+ # We have a hard buildtime blocker against an installed
+ # package that will not resolve automatically (unless
+ # the option requested in bug 250286 is implemented).
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-buildtime-unbuilt-hard-a"],
+ success = False,
+ mergelist = ['app-misc/blocker-buildtime-unbuilt-hard-a-1', '!!app-misc/installed-blocker-a']),
+ # An installed package has a hard runtime blocker that
+ # will not resolve automatically (unless the option
+ # requested in bug 250286 is implemented).
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-runtime-hard-a"],
+ success = False,
+ mergelist = ['app-misc/blocker-runtime-hard-a-1', '!!app-misc/blocker-runtime-hard-a']),
+ # Test swapping of providers for a new-style virtual package,
+ # which relies on delayed evaluation of disjunctive (virtual
+ # and ||) deps as required to solve bug #264434. Note that
+ # this behavior is not supported for old-style PROVIDE virtuals,
+ # as reported in bug #339164.
+ ResolverPlaygroundTestCase(
+ ["media-video/libav"],
+ success=True,
+ mergelist = ['media-video/libav-0.7_pre20110327', 'media-video/ffmpeg-0.7_rc1', '!media-video/ffmpeg']),
+ # Test that PORTAGE_PACKAGE_ATOM is merged asap. Optimally,
+ # satisfied deps are always merged after the asap nodes that
+ # depend on them.
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python", portage.const.PORTAGE_PACKAGE_ATOM],
+ success = True,
+ all_permutations = True,
+ mergelist = ['app-admin/eselect-python-20100321', 'sys-apps/portage-2.1.9.49', 'dev-lang/python-3.2']),
+ # Test that OS_HEADERS_PACKAGE_ATOM and LIBC_PACKAGE_ATOM
+ # are merged asap, in order to account for implicit
+ # dependencies. See bug #303567. Optimally, satisfied deps
+ # are always merged after the asap nodes that depend on them.
+ ResolverPlaygroundTestCase(
+ ["app-arch/xz-utils", "sys-kernel/linux-headers", "sys-devel/binutils", "sys-libs/glibc"],
+ options = {"--complete-graph" : True},
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ mergelist = ['sys-kernel/linux-headers-2.6.39', 'sys-devel/gcc-4.5.2', 'sys-libs/glibc-2.13', ('app-arch/xz-utils-5.0.2', 'sys-devel/binutils-2.20.1')]),
+ # Test asap install of PDEPEND for bug #180045.
+ ResolverPlaygroundTestCase(
+ ["kde-base/kmines", "kde-base/kdnssd", "kde-base/kdelibs", "app-arch/xz-utils"],
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions = (
+ ('dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2'),
+ ('kde-misc/kdnssd-avahi-0.1.2', 'kde-base/libkdegames-3.5.7'),
+ ('kde-misc/kdnssd-avahi-0.1.2', 'kde-base/kdnssd-3.5.7'),
+ ('kde-base/libkdegames-3.5.7', 'kde-base/kmines-3.5.7'),
+ ),
+ mergelist = [('kde-base/kdelibs-3.5.7', 'dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2', 'app-arch/xz-utils-5.0.2', 'kde-base/libkdegames-3.5.7', 'kde-base/kdnssd-3.5.7', 'kde-base/kmines-3.5.7')]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py b/portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py
new file mode 100644
index 0000000..a860e7b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py
@@ -0,0 +1,31 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MissingIUSEandEvaluatedAtomsTestCase(TestCase):
+
+ def testMissingIUSEandEvaluatedAtoms(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B[foo?]", "IUSE": "foo bar", "EAPI": 2 },
+ "dev-libs/A-2": { "DEPEND": "dev-libs/B[foo?,bar]", "IUSE": "foo bar", "EAPI": 2 },
+ "dev-libs/B-1": { "IUSE": "bar" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py b/portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py
new file mode 100644
index 0000000..34c6d45
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py
@@ -0,0 +1,318 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MultirepoTestCase(TestCase):
+
+ def testMultirepo(self):
+ ebuilds = {
+ #Simple repo selection
+ "dev-libs/A-1": { },
+ "dev-libs/A-1::repo1": { },
+ "dev-libs/A-2::repo1": { },
+ "dev-libs/A-1::repo2": { },
+
+ #Packages in exactly one repo
+ "dev-libs/B-1": { },
+ "dev-libs/C-1::repo1": { },
+
+ #Package in repository 1 and 2, but 1 must be used
+ "dev-libs/D-1::repo1": { },
+ "dev-libs/D-1::repo2": { },
+
+ "dev-libs/E-1": { },
+ "dev-libs/E-1::repo1": { },
+ "dev-libs/E-1::repo2": { "SLOT": "1" },
+
+ "dev-libs/F-1::repo1": { "SLOT": "1" },
+ "dev-libs/F-1::repo2": { "SLOT": "1" },
+
+ "dev-libs/G-1::repo1": { "EAPI" : "4", "IUSE":"+x +y", "REQUIRED_USE" : "" },
+ "dev-libs/G-1::repo2": { "EAPI" : "4", "IUSE":"+x +y", "REQUIRED_USE" : "^^ ( x y )" },
+
+ "dev-libs/H-1": { "KEYWORDS": "x86", "EAPI" : "3",
+ "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )" },
+
+ "dev-libs/I-1::repo2": { "SLOT" : "1"},
+ "dev-libs/I-2::repo2": { "SLOT" : "2"},
+ }
+
+ installed = {
+ "dev-libs/H-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )"},
+ "dev-libs/I-2::repo1": {"SLOT" : "2"},
+ }
+
+ sets = {
+ "multirepotest":
+ ( "dev-libs/A::test_repo", )
+ }
+
+ test_cases = (
+ #Simple repo selection
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-2::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A::test_repo"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A::repo2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1::repo1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["@multirepotest"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #Packages in exactly one repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1::repo1"]),
+
+ #Package in repository 1 and 2, but 2 must be used
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/D-1::repo2"]),
+
+ #Atoms with slots
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:1::repo2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/F-1:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/F-1:1::repo1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo1"]),
+
+ # Dependency on installed dev-libs/C-2 ebuild for which ebuild is
+ # not available from the same repo should not unnecessarily
+ # reinstall the same version from a different repo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/H"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = []),
+
+ # Check interaction between repo priority and unsatisfied
+ # REQUIRED_USE, for bug #350254.
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/G-1"],
+ check_repo_names = True,
+ success = False),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, sets=sets)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testMultirepoUserConfig(self):
+ ebuilds = {
+ #package.use test
+ "dev-libs/A-1": { "IUSE": "foo" },
+ "dev-libs/A-2::repo1": { "IUSE": "foo" },
+ "dev-libs/A-3::repo2": { },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A", "EAPI": 2 },
+ "dev-libs/B-2": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+ "dev-libs/B-3": { "DEPEND": "dev-libs/A[-foo]", "EAPI": 2 },
+
+ #package.keywords test
+ "dev-libs/C-1": { "KEYWORDS": "~x86" },
+ "dev-libs/C-1::repo1": { "KEYWORDS": "~x86" },
+
+ #package.license
+ "dev-libs/D-1": { "LICENSE": "TEST" },
+ "dev-libs/D-1::repo1": { "LICENSE": "TEST" },
+
+ #package.mask
+ "dev-libs/E-1": { },
+ "dev-libs/E-1::repo1": { },
+ "dev-libs/H-1": { },
+ "dev-libs/H-1::repo1": { },
+ "dev-libs/I-1::repo2": { "SLOT" : "1"},
+ "dev-libs/I-2::repo2": { "SLOT" : "2"},
+ "dev-libs/J-1": { "KEYWORDS": "x86", "EAPI" : "3",
+ "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )" },
+
+ #package.properties
+ "dev-libs/F-1": { "PROPERTIES": "bar"},
+ "dev-libs/F-1::repo1": { "PROPERTIES": "bar"},
+
+ #package.unmask
+ "dev-libs/G-1": { },
+ "dev-libs/G-1::repo1": { },
+
+ #package.mask with wildcards
+ "dev-libs/Z-1::repo3": { },
+ }
+
+ installed = {
+ "dev-libs/J-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )"},
+ "dev-libs/I-2::repo1": {"SLOT" : "2"},
+ }
+
+ user_config = {
+ "package.use":
+ (
+ "dev-libs/A::repo1 foo",
+ ),
+ "package.keywords":
+ (
+ "=dev-libs/C-1::test_repo",
+ ),
+ "package.license":
+ (
+ "=dev-libs/D-1::test_repo TEST",
+ ),
+ "package.mask":
+ (
+ "dev-libs/E::repo1",
+ "dev-libs/H",
+ "dev-libs/I::repo1",
+ #needed for package.unmask test
+ "dev-libs/G",
+ #wildcard test
+ "*/*::repo3",
+ ),
+ "package.properties":
+ (
+ "dev-libs/F::repo1 -bar",
+ ),
+ "package.unmask":
+ (
+ "dev-libs/G::test_repo",
+ ),
+ }
+
+ test_cases = (
+ #package.use test
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-3::repo2", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-2::repo1", "dev-libs/B-2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-3"],
+ options = { "--autounmask": 'n' },
+ success = False,
+ check_repo_names = True),
+
+ #package.keywords test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1"]),
+
+ #package.license test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/D-1"]),
+
+ #package.mask test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1"]),
+
+ # Dependency on installed dev-libs/C-2 ebuild for which ebuild is
+ # masked from the same repo should not unnecessarily pull
+ # in a different slot. It should just pull in the same slot from
+ # a different repo (bug #351828).
+ ResolverPlaygroundTestCase(
+ ["dev-libs/J"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/I-2"]),
+
+ #package.properties test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1"]),
+
+ #package.mask test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/G"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/G-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/H"],
+ options = { "--autounmask": 'n' },
+ success = False),
+
+ #package.mask with wildcards
+ ResolverPlaygroundTestCase(
+ ["dev-libs/Z"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_multislot.py b/portage_with_autodep/pym/portage/tests/resolver/test_multislot.py
new file mode 100644
index 0000000..8615419
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_multislot.py
@@ -0,0 +1,40 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MultSlotTestCase(TestCase):
+
+ def testMultiSlotSelective(self):
+ """
+ Test that a package isn't reinstalled due to SLOT dependency
+ interaction with USE=multislot (bug #220341).
+ """
+
+ ebuilds = {
+ "sys-devel/gcc-4.4.4": { "SLOT": "4.4" },
+ }
+
+ installed = {
+ "sys-devel/gcc-4.4.4": { "SLOT": "i686-pc-linux-gnu-4.4.4" },
+ }
+
+ options = {'--update' : True, '--deep' : True, '--selective' : True}
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["sys-devel/gcc:4.4"],
+ options = options,
+ mergelist = [],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py b/portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py
new file mode 100644
index 0000000..8aedf59
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py
@@ -0,0 +1,35 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class OldDepChainDisplayTestCase(TestCase):
+
+ def testOldDepChainDisplay(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "foo? ( dev-libs/B[-bar] )", "IUSE": "+foo", "EAPI": "2" },
+ "dev-libs/A-2": { "DEPEND": "foo? ( dev-libs/C )", "IUSE": "+foo", "EAPI": "1" },
+ "dev-libs/B-1": { "IUSE": "bar", "DEPEND": "!bar? ( dev-libs/D[-baz] )", "EAPI": "2" },
+ "dev-libs/C-1": { "KEYWORDS": "~x86" },
+ "dev-libs/D-1": { "IUSE": "+baz", "EAPI": "1" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_output.py b/portage_with_autodep/pym/portage/tests/resolver/test_output.py
new file mode 100644
index 0000000..34efe9c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_output.py
@@ -0,0 +1,88 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MergelistOutputTestCase(TestCase):
+
+ def testMergelistOutput(self):
+ """
+ This test doesn't check if the output is correct, but makes sure
+ that we don't backtrace somewhere in the output code.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B dev-libs/C", "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/D", "IUSE": "foo +bar", "EAPI": 1 },
+ "dev-libs/C-1": { "DEPEND": "dev-libs/E", "IUSE": "foo bar" },
+ "dev-libs/D-1": { "IUSE": "" },
+ "dev-libs/E-1": {},
+
+ #reinstall for flags
+ "dev-libs/Z-1": { "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/Y-1": { "IUSE": "foo", "EAPI": 1 },
+ "dev-libs/X-1": {},
+ "dev-libs/W-1": { "IUSE": "+foo", "EAPI": 1 },
+ }
+
+ installed = {
+ "dev-libs/Z-1": { "USE": "", "IUSE": "foo" },
+ "dev-libs/Y-1": { "USE": "foo", "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/X-1": { "USE": "foo", "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/W-1": { },
+ }
+
+ option_cobos = (
+ (),
+ ("verbose",),
+ ("tree",),
+ ("tree", "unordered-display",),
+ ("verbose",),
+ ("verbose", "tree",),
+ ("verbose", "tree", "unordered-display",),
+ )
+
+ test_cases = []
+ for options in option_cobos:
+ testcase_opts = {}
+ for opt in options:
+ testcase_opts["--" + opt] = True
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = testcase_opts,
+ success = True,
+ ignore_mergelist_order=True,
+ mergelist = ["dev-libs/D-1", "dev-libs/E-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/Z"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/Z-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/Y"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/Y-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/X"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/X-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/W"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/W-1"]))
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py b/portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py
new file mode 100644
index 0000000..b9c4d6d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py
@@ -0,0 +1,138 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class RebuildTestCase(TestCase):
+
+ def testRebuild(self):
+ """
+ Rebuild packages when dependencies that are used at both build-time and
+ run-time are upgraded.
+ """
+
+ ebuilds = {
+ "sys-libs/x-1": { },
+ "sys-libs/x-1-r1": { },
+ "sys-libs/x-2": { },
+ "sys-apps/a-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+ "sys-apps/a-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+ "sys-apps/b-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+ "sys-apps/b-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+ "sys-apps/c-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/c-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/d-1": { "RDEPEND" : "sys-libs/x"},
+ "sys-apps/d-2": { "RDEPEND" : "sys-libs/x"},
+ "sys-apps/e-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+ "sys-apps/f-2": { "DEPEND" : "sys-apps/a", "RDEPEND" : "sys-apps/a"},
+ "sys-apps/g-2": { "DEPEND" : "sys-apps/b sys-libs/x",
+ "RDEPEND" : "sys-apps/b"},
+ }
+
+ installed = {
+ "sys-libs/x-1": { },
+ "sys-apps/a-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+ "sys-apps/b-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+ "sys-apps/c-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/d-1": { "RDEPEND" : "sys-libs/x"},
+ "sys-apps/e-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+ "sys-apps/f-1": { "DEPEND" : "sys-apps/a", "RDEPEND" : "sys-apps/a"},
+ "sys-apps/g-1": { "DEPEND" : "sys-apps/b sys-libs/x",
+ "RDEPEND" : "sys-apps/b"},
+ }
+
+ world = ["sys-apps/a", "sys-apps/b", "sys-apps/c", "sys-apps/d",
+ "sys-apps/e", "sys-apps/f", "sys-apps/g"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x"],
+ options = {"--rebuild-if-unbuilt" : True,
+ "--rebuild-exclude" : ["sys-apps/b"]},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/e-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x"],
+ options = {"--rebuild-if-unbuilt" : True},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
+ 'sys-apps/e-2', 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x"],
+ options = {"--rebuild-if-unbuilt" : True,
+ "--rebuild-ignore" : ["sys-libs/x"]},
+ mergelist = ['sys-libs/x-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x"],
+ options = {"--rebuild-if-unbuilt" : True,
+ "--rebuild-ignore" : ["sys-apps/b"]},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
+ 'sys-apps/e-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1-r1"],
+ options = {"--rebuild-if-unbuilt" : True},
+ mergelist = ['sys-libs/x-1-r1', 'sys-apps/a-2',
+ 'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1-r1"],
+ options = {"--rebuild-if-new-rev" : True},
+ mergelist = ['sys-libs/x-1-r1', 'sys-apps/a-2',
+ 'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1-r1"],
+ options = {"--rebuild-if-new-ver" : True},
+ mergelist = ['sys-libs/x-1-r1'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x"],
+ options = {"--rebuild-if-new-ver" : True},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2',
+ 'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1"],
+ options = {"--rebuild-if-new-rev" : True},
+ mergelist = ['sys-libs/x-1'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1"],
+ options = {"--rebuild-if-unbuilt" : True},
+ mergelist = ['sys-libs/x-1', 'sys-apps/a-2',
+ 'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_required_use.py b/portage_with_autodep/pym/portage/tests/resolver/test_required_use.py
new file mode 100644
index 0000000..c8810fa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_required_use.py
@@ -0,0 +1,114 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class RequiredUSETestCase(TestCase):
+
+ def testRequiredUSE(self):
+ """
+ Only simple REQUIRED_USE values here. The parser is tested under in dep/testCheckRequiredUse
+ """
+
+ ebuilds = {
+ "dev-libs/A-1" : {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-2" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-3" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-4" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-5" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( )"},
+
+ "dev-libs/B-1" : {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-2" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-3" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-4" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-5" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( )"},
+
+ "dev-libs/C-1" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "foo? ( !bar )"},
+ "dev-libs/C-2" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "foo? ( !bar )"},
+ "dev-libs/C-3" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-4" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-5" : {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-6" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-7" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-8" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-9" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-10": {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-11": {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "!foo? ( !bar )"},
+ "dev-libs/C-12": {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "!foo? ( !bar )"},
+ "dev-libs/C-13": {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "!foo? ( !bar )"},
+ "dev-libs/C-14": {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "!foo? ( !bar )"},
+
+ "dev-libs/D-1" : {"EAPI": "4", "IUSE": "+w +x +y z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-2" : {"EAPI": "4", "IUSE": "+w +x +y +z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-3" : {"EAPI": "4", "IUSE": "+w +x y z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-4" : {"EAPI": "4", "IUSE": "+w x +y +z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-5" : {"EAPI": "4", "IUSE": "w x y z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(["=dev-libs/A-1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2"], success = True, mergelist=["dev-libs/A-2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3"], success = True, mergelist=["dev-libs/A-3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4"], success = True, mergelist=["dev-libs/A-4"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5"], success = True, mergelist=["dev-libs/A-5"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/B-1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/B-2"], success = True, mergelist=["dev-libs/B-2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/B-3"], success = True, mergelist=["dev-libs/B-3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/B-4"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/B-5"], success = True, mergelist=["dev-libs/B-5"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/C-1"], success = True, mergelist=["dev-libs/C-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-3"], success = True, mergelist=["dev-libs/C-3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-4"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-5"], success = True, mergelist=["dev-libs/C-5"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-6"], success = True, mergelist=["dev-libs/C-6"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-7"], success = True, mergelist=["dev-libs/C-7"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-8"], success = True, mergelist=["dev-libs/C-8"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-9"], success = True, mergelist=["dev-libs/C-9"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-10"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-11"], success = True, mergelist=["dev-libs/C-11"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-12"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-13"], success = True, mergelist=["dev-libs/C-13"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-14"], success = True, mergelist=["dev-libs/C-14"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/D-1"], success = True, mergelist=["dev-libs/D-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/D-2"], success = True, mergelist=["dev-libs/D-2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/D-3"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/D-4"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/D-5"], success = True, mergelist=["dev-libs/D-5"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testRequiredUseOrDeps(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "IUSE": "+x +y", "REQUIRED_USE": "^^ ( x y )", "EAPI": "4" },
+ "dev-libs/B-1": { "IUSE": "+x +y", "REQUIRED_USE": "", "EAPI": "4" },
+ "app-misc/p-1": { "RDEPEND": "|| ( =dev-libs/A-1 =dev-libs/B-1 )" },
+ }
+
+ test_cases = (
+ # This should fail and show a REQUIRED_USE error for
+ # dev-libs/A-1, since this choice it preferred.
+ ResolverPlaygroundTestCase(
+ ["=app-misc/p-1"],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_simple.py b/portage_with_autodep/pym/portage/tests/resolver/test_simple.py
new file mode 100644
index 0000000..0bcfc4b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_simple.py
@@ -0,0 +1,57 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleResolverTestCase(TestCase):
+
+ def testSimple(self):
+ ebuilds = {
+ "dev-libs/A-1": { "KEYWORDS": "x86" },
+ "dev-libs/A-2": { "KEYWORDS": "~x86" },
+ "dev-libs/B-1.2": {},
+
+ "app-misc/Z-1": { "DEPEND": "|| ( app-misc/Y ( app-misc/X app-misc/W ) )", "RDEPEND": "" },
+ "app-misc/Y-1": { "KEYWORDS": "~x86" },
+ "app-misc/X-1": {},
+ "app-misc/W-1": {},
+ }
+ installed = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1.1": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(["dev-libs/A"], success = True, mergelist = ["dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2"], options = { "--autounmask": 'n' }, success = False),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--noreplace": True},
+ success = True,
+ mergelist = []),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--noreplace": True},
+ success = True,
+ mergelist = []),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--update": True},
+ success = True,
+ mergelist = ["dev-libs/B-1.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["app-misc/Z"],
+ success = True,
+ mergelist = ["app-misc/W-1", "app-misc/X-1", "app-misc/Z-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py b/portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py
new file mode 100644
index 0000000..4867cea
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py
@@ -0,0 +1,143 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotCollisionTestCase(TestCase):
+
+ def testSlotCollision(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "PDEPEND": "foo? ( dev-libs/B )", "IUSE": "foo" },
+ "dev-libs/B-1": { "IUSE": "foo" },
+ "dev-libs/C-1": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+ "dev-libs/D-1": { "DEPEND": "dev-libs/A[foo=] dev-libs/B[foo=]", "IUSE": "foo", "EAPI": 2 },
+ "dev-libs/E-1": { },
+ "dev-libs/E-2": { "IUSE": "foo" },
+
+ "app-misc/Z-1": { },
+ "app-misc/Z-2": { },
+ "app-misc/Y-1": { "DEPEND": "=app-misc/Z-1" },
+ "app-misc/Y-2": { "DEPEND": ">app-misc/Z-1" },
+ "app-misc/X-1": { "DEPEND": "=app-misc/Z-2" },
+ "app-misc/X-2": { "DEPEND": "<app-misc/Z-2" },
+
+ "sci-libs/K-1": { "IUSE": "+foo", "EAPI": 1 },
+ "sci-libs/L-1": { "DEPEND": "sci-libs/K[-foo]", "EAPI": 2 },
+ "sci-libs/M-1": { "DEPEND": "sci-libs/K[foo=]", "IUSE": "+foo", "EAPI": 2 },
+
+ "sci-libs/Q-1": { "SLOT": "1", "IUSE": "+bar foo", "EAPI": 1 },
+ "sci-libs/Q-2": { "SLOT": "2", "IUSE": "+bar +foo", "EAPI": 2, "PDEPEND": "sci-libs/Q:1[bar?,foo?]" },
+ "sci-libs/P-1": { "DEPEND": "sci-libs/Q:1[foo=]", "IUSE": "foo", "EAPI": 2 },
+
+ "sys-libs/A-1": { "RDEPEND": "foo? ( sys-libs/J[foo=] )", "IUSE": "+foo", "EAPI": "4" },
+ "sys-libs/B-1": { "RDEPEND": "bar? ( sys-libs/J[bar=] )", "IUSE": "+bar", "EAPI": "4" },
+ "sys-libs/C-1": { "RDEPEND": "sys-libs/J[bar]", "EAPI": "4" },
+ "sys-libs/D-1": { "RDEPEND": "sys-libs/J[bar?]", "IUSE": "bar", "EAPI": "4" },
+ "sys-libs/E-1": { "RDEPEND": "sys-libs/J[foo(+)?]", "IUSE": "+foo", "EAPI": "4" },
+ "sys-libs/F-1": { "RDEPEND": "sys-libs/J[foo(+)]", "EAPI": "4" },
+ "sys-libs/J-1": { "IUSE": "+foo", "EAPI": "4" },
+ "sys-libs/J-2": { "IUSE": "+bar", "EAPI": "4" },
+
+ "app-misc/A-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-misc/B-1": { "DEPEND": "=app-misc/A-1[foo=]", "IUSE": "foo", "EAPI": 2 },
+ "app-misc/C-1": { "DEPEND": "=app-misc/A-1[foo]", "EAPI": 2 },
+ "app-misc/E-1": { "RDEPEND": "dev-libs/E[foo?]", "IUSE": "foo", "EAPI": "2" },
+ "app-misc/F-1": { "RDEPEND": "=dev-libs/E-1", "IUSE": "foo", "EAPI": "2" },
+ }
+ installed = {
+ "dev-libs/A-1": { "PDEPEND": "foo? ( dev-libs/B )", "IUSE": "foo", "USE": "foo" },
+ "dev-libs/B-1": { "IUSE": "foo", "USE": "foo" },
+ "dev-libs/C-1": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+ "dev-libs/D-1": { "DEPEND": "dev-libs/A[foo=] dev-libs/B[foo=]", "IUSE": "foo", "USE": "foo", "EAPI": 2 },
+
+ "sci-libs/K-1": { "IUSE": "foo", "USE": "" },
+ "sci-libs/L-1": { "DEPEND": "sci-libs/K[-foo]" },
+
+ "sci-libs/Q-1": { "SLOT": "1", "IUSE": "+bar +foo", "USE": "bar foo", "EAPI": 1 },
+ "sci-libs/Q-2": { "SLOT": "2", "IUSE": "+bar +foo", "USE": "bar foo", "EAPI": 2, "PDEPEND": "sci-libs/Q:1[bar?,foo?]" },
+
+ "app-misc/A-1": { "IUSE": "+foo bar", "USE": "foo", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ }
+
+ test_cases = (
+ #A qt-*[qt3support] like mess.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B", "dev-libs/C", "dev-libs/D"],
+ options = { "--autounmask": 'n' },
+ success = False,
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = [ {"dev-libs/A-1": {"foo": True}, "dev-libs/D-1": {"foo": True}} ]),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/A", "sys-libs/B", "sys-libs/C", "sys-libs/D", "sys-libs/E", "sys-libs/F"],
+ options = { "--autounmask": 'n' },
+ success = False,
+ ignore_mergelist_order = True,
+ slot_collision_solutions = [],
+ mergelist = ['sys-libs/J-2', 'sys-libs/J-1', 'sys-libs/A-1', 'sys-libs/B-1', 'sys-libs/C-1', 'sys-libs/D-1', 'sys-libs/E-1', 'sys-libs/F-1'],
+ ),
+
+ #A version based conflicts, nothing we can do.
+ ResolverPlaygroundTestCase(
+ ["=app-misc/X-1", "=app-misc/Y-1"],
+ success = False,
+ mergelist = ["app-misc/Z-1", "app-misc/Z-2", "app-misc/X-1", "app-misc/Y-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+ ResolverPlaygroundTestCase(
+ ["=app-misc/X-2", "=app-misc/Y-2"],
+ success = False,
+ mergelist = ["app-misc/Z-1", "app-misc/Z-2", "app-misc/X-2", "app-misc/Y-2"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["=app-misc/E-1", "=app-misc/F-1"],
+ success = False,
+ mergelist = ["dev-libs/E-1", "dev-libs/E-2", "app-misc/E-1", "app-misc/F-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+
+ #Simple cases.
+ ResolverPlaygroundTestCase(
+ ["sci-libs/L", "sci-libs/M"],
+ success = False,
+ mergelist = ["sci-libs/L-1", "sci-libs/M-1", "sci-libs/K-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = [{"sci-libs/K-1": {"foo": False}, "sci-libs/M-1": {"foo": False}}]
+ ),
+
+ #Avoid duplicates.
+ ResolverPlaygroundTestCase(
+ ["sci-libs/P", "sci-libs/Q:2"],
+ success = False,
+ options = { "--update": True, "--complete-graph": True, "--autounmask": 'n' },
+ mergelist = ["sci-libs/P-1", "sci-libs/Q-1"],
+ ignore_mergelist_order = True,
+ all_permutations=True,
+ slot_collision_solutions = [{"sci-libs/Q-1": {"foo": True}, "sci-libs/P-1": {"foo": True}}]
+ ),
+
+ #Conflict with REQUIRED_USE
+ ResolverPlaygroundTestCase(
+ ["=app-misc/C-1", "=app-misc/B-1"],
+ all_permutations = True,
+ slot_collision_solutions = [],
+ mergelist = ["app-misc/A-1", "app-misc/C-1", "app-misc/B-1"],
+ ignore_mergelist_order = True,
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py b/portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py
new file mode 100644
index 0000000..7d17106
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py
@@ -0,0 +1,40 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseDepDefaultsTestCase(TestCase):
+
+ def testUseDepDefaultse(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B[foo]", "RDEPEND": "dev-libs/B[foo]", "EAPI": "2" },
+ "dev-libs/A-2": { "DEPEND": "dev-libs/B[foo(+)]", "RDEPEND": "dev-libs/B[foo(+)]", "EAPI": "4" },
+ "dev-libs/A-3": { "DEPEND": "dev-libs/B[foo(-)]", "RDEPEND": "dev-libs/B[foo(-)]", "EAPI": "4" },
+ "dev-libs/B-1": { "IUSE": "+foo", "EAPI": "1" },
+ "dev-libs/B-2": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ success = True,
+ mergelist = ["dev-libs/B-1", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ success = True,
+ mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-3"],
+ success = True,
+ mergelist = ["dev-libs/B-1", "dev-libs/A-3"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/portage_with_autodep/pym/portage/tests/runTests b/portage_with_autodep/pym/portage/tests/runTests
new file mode 100755
index 0000000..6b3311d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/runTests
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+# runTests.py -- Portage Unit Test Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os, sys
+import os.path as osp
+import grp
+import pwd
+import signal
+
+def debug_signal(signum, frame):
+ import pdb
+ pdb.set_trace()
+signal.signal(signal.SIGUSR1, debug_signal)
+
+# Pretend that the current user's uid/gid are the 'portage' uid/gid,
+# so things go smoothly regardless of the current user and global
+# user/group configuration.
+os.environ["PORTAGE_USERNAME"] = pwd.getpwuid(os.getuid()).pw_name
+os.environ["PORTAGE_GRPNAME"] = grp.getgrgid(os.getgid()).gr_name
+
+# Insert our parent dir so we can do shiny import "tests"
+# This line courtesy of Marienz and Pkgcore ;)
+sys.path.insert(0, osp.dirname(osp.dirname(osp.dirname(osp.abspath(__file__)))))
+
+import portage
+
+# Ensure that we don't instantiate portage.settings, so that tests should
+# work the same regardless of global configuration file state/existence.
+portage._disable_legacy_globals()
+
+import portage.tests as tests
+from portage.const import PORTAGE_BIN_PATH
+path = os.environ.get("PATH", "").split(":")
+path = [x for x in path if x]
+if not path or not os.path.samefile(path[0], PORTAGE_BIN_PATH):
+ path.insert(0, PORTAGE_BIN_PATH)
+ os.environ["PATH"] = ":".join(path)
+del path
+
+
+if __name__ == "__main__":
+ result = tests.main()
+ if not result.wasSuccessful():
+ sys.exit(1)
diff --git a/portage_with_autodep/pym/portage/tests/sets/__init__.py b/portage_with_autodep/pym/portage/tests/sets/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/__init__.py
diff --git a/portage_with_autodep/pym/portage/tests/sets/base/__init__.py b/portage_with_autodep/pym/portage/tests/sets/base/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/base/__init__.py
diff --git a/portage_with_autodep/pym/portage/tests/sets/base/__test__ b/portage_with_autodep/pym/portage/tests/sets/base/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/base/__test__
diff --git a/portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py b/portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py
new file mode 100644
index 0000000..e0a3478
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py
@@ -0,0 +1,61 @@
+# testConfigFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+from portage.tests import TestCase
+from portage._sets.base import InternalPackageSet
+
+class InternalPackageSetTestCase(TestCase):
+ """Simple Test Case for InternalPackageSet"""
+
+ def testInternalPackageSet(self):
+ i1_atoms = set(("dev-libs/A", ">=dev-libs/A-1", "dev-libs/B"))
+ i2_atoms = set(("dev-libs/A", "dev-libs/*", "dev-libs/C"))
+
+ i1 = InternalPackageSet(initial_atoms=i1_atoms)
+ i2 = InternalPackageSet(initial_atoms=i2_atoms, allow_wildcard=True)
+ self.assertRaises(InvalidAtom, InternalPackageSet, initial_atoms=i2_atoms)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ new_atom = Atom("*/*", allow_wildcard=True)
+ self.assertRaises(InvalidAtom, i1.add, new_atom)
+ i2.add(new_atom)
+
+ i2_atoms.add(new_atom)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ removed_atom = Atom("dev-libs/A")
+
+ i1.remove(removed_atom)
+ i2.remove(removed_atom)
+
+ i1_atoms.remove(removed_atom)
+ i2_atoms.remove(removed_atom)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ update_atoms = [Atom("dev-libs/C"), Atom("dev-*/C", allow_wildcard=True)]
+
+ self.assertRaises(InvalidAtom, i1.update, update_atoms)
+ i2.update(update_atoms)
+
+ i2_atoms.update(update_atoms)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ replace_atoms = [Atom("dev-libs/D"), Atom("*-libs/C", allow_wildcard=True)]
+
+ self.assertRaises(InvalidAtom, i1.replace, replace_atoms)
+ i2.replace(replace_atoms)
+
+ i2_atoms = set(replace_atoms)
+
+ self.assertEqual(i2.getAtoms(), i2_atoms)
diff --git a/portage_with_autodep/pym/portage/tests/sets/files/__init__.py b/portage_with_autodep/pym/portage/tests/sets/files/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/files/__init__.py
diff --git a/portage_with_autodep/pym/portage/tests/sets/files/__test__ b/portage_with_autodep/pym/portage/tests/sets/files/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/files/__test__
diff --git a/portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py b/portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py
new file mode 100644
index 0000000..3ec26a0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py
@@ -0,0 +1,32 @@
+# testConfigFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage.tests import TestCase, test_cps
+from portage._sets.files import ConfigFileSet
+
+class ConfigFileSetTestCase(TestCase):
+ """Simple Test Case for ConfigFileSet"""
+
+ def setUp(self):
+ fd, self.testfile = tempfile.mkstemp(suffix=".testdata", prefix=self.__class__.__name__, text=True)
+ f = os.fdopen(fd, 'w')
+ for i in range(0, len(test_cps)):
+ atom = test_cps[i]
+ if i % 2 == 0:
+ f.write(atom + ' abc def\n')
+ else:
+ f.write(atom + '\n')
+ f.close()
+
+ def tearDown(self):
+ os.unlink(self.testfile)
+
+ def testConfigStaticFileSet(self):
+ s = ConfigFileSet(self.testfile)
+ s.load()
+ self.assertEqual(set(test_cps), s.getAtoms())
+
diff --git a/portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py b/portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py
new file mode 100644
index 0000000..d515a67
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py
@@ -0,0 +1,27 @@
+# testStaticFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage.tests import TestCase, test_cps
+from portage._sets.files import StaticFileSet
+
+class StaticFileSetTestCase(TestCase):
+ """Simple Test Case for StaticFileSet"""
+
+ def setUp(self):
+ fd, self.testfile = tempfile.mkstemp(suffix=".testdata", prefix=self.__class__.__name__, text=True)
+ f = os.fdopen(fd, 'w')
+ f.write("\n".join(test_cps))
+ f.close()
+
+ def tearDown(self):
+ os.unlink(self.testfile)
+
+ def testSampleStaticFileSet(self):
+ s = StaticFileSet(self.testfile)
+ s.load()
+ self.assertEqual(set(test_cps), s.getAtoms())
+
diff --git a/portage_with_autodep/pym/portage/tests/sets/shell/__init__.py b/portage_with_autodep/pym/portage/tests/sets/shell/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/shell/__init__.py
diff --git a/portage_with_autodep/pym/portage/tests/sets/shell/__test__ b/portage_with_autodep/pym/portage/tests/sets/shell/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/shell/__test__
diff --git a/portage_with_autodep/pym/portage/tests/sets/shell/testShell.py b/portage_with_autodep/pym/portage/tests/sets/shell/testShell.py
new file mode 100644
index 0000000..2cdd833
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/shell/testShell.py
@@ -0,0 +1,28 @@
+# testCommandOututSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.process import find_binary
+from portage.tests import TestCase, test_cps
+from portage._sets.shell import CommandOutputSet
+
+class CommandOutputSetTestCase(TestCase):
+ """Simple Test Case for CommandOutputSet"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def testCommand(self):
+
+ input = set(test_cps)
+ command = find_binary("bash")
+ command += " -c '"
+ for a in input:
+ command += " echo -e \"%s\" ; " % a
+ command += "'"
+ s = CommandOutputSet(command)
+ atoms = s.getAtoms()
+ self.assertEqual(atoms, input)
diff --git a/portage_with_autodep/pym/portage/tests/unicode/__init__.py b/portage_with_autodep/pym/portage/tests/unicode/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/unicode/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/tests/unicode/__test__ b/portage_with_autodep/pym/portage/tests/unicode/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/unicode/__test__
diff --git a/portage_with_autodep/pym/portage/tests/unicode/test_string_format.py b/portage_with_autodep/pym/portage/tests/unicode/test_string_format.py
new file mode 100644
index 0000000..fb6e8e0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/unicode/test_string_format.py
@@ -0,0 +1,108 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+from portage import _encodings, _unicode_decode
+from portage.exception import PortageException
+from portage.tests import TestCase
+from _emerge.DependencyArg import DependencyArg
+from _emerge.UseFlagDisplay import UseFlagDisplay
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+STR_IS_UNICODE = sys.hexversion >= 0x3000000
+
+class StringFormatTestCase(TestCase):
+ """
+ Test that string formatting works correctly in the current interpretter,
+ which may be either python2 or python3.
+ """
+
+ # In order to get some unicode test strings in a way that works in
+ # both python2 and python3, write them here as byte strings and
+ # decode them before use. This assumes _encodings['content'] is
+ # utf_8.
+
+ unicode_strings = (
+ b'\xE2\x80\x98',
+ b'\xE2\x80\x99',
+ )
+
+ def testDependencyArg(self):
+
+ self.assertEqual(_encodings['content'], 'utf_8')
+
+ for arg_bytes in self.unicode_strings:
+ arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+ dependency_arg = DependencyArg(arg=arg_unicode)
+
+ # Force unicode format string so that __unicode__() is
+ # called in python2.
+ formatted_str = _unicode_decode("%s") % (dependency_arg,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ if STR_IS_UNICODE:
+
+ # Test the __str__ method which returns unicode in python3
+ formatted_str = "%s" % (dependency_arg,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ else:
+
+ # Test the __str__ method which returns encoded bytes in python2
+ formatted_bytes = "%s" % (dependency_arg,)
+ self.assertEqual(formatted_bytes, arg_bytes)
+
+ def testPortageException(self):
+
+ self.assertEqual(_encodings['content'], 'utf_8')
+
+ for arg_bytes in self.unicode_strings:
+ arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+ e = PortageException(arg_unicode)
+
+ # Force unicode format string so that __unicode__() is
+ # called in python2.
+ formatted_str = _unicode_decode("%s") % (e,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ if STR_IS_UNICODE:
+
+ # Test the __str__ method which returns unicode in python3
+ formatted_str = "%s" % (e,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ else:
+
+ # Test the __str__ method which returns encoded bytes in python2
+ formatted_bytes = "%s" % (e,)
+ self.assertEqual(formatted_bytes, arg_bytes)
+
+ def testUseFlagDisplay(self):
+
+ self.assertEqual(_encodings['content'], 'utf_8')
+
+ for enabled in (True, False):
+ for forced in (True, False):
+ for arg_bytes in self.unicode_strings:
+ arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+ e = UseFlagDisplay(arg_unicode, enabled, forced)
+
+ # Force unicode format string so that __unicode__() is
+ # called in python2.
+ formatted_str = _unicode_decode("%s") % (e,)
+ self.assertEqual(isinstance(formatted_str, basestring), True)
+
+ if STR_IS_UNICODE:
+
+ # Test the __str__ method which returns unicode in python3
+ formatted_str = "%s" % (e,)
+ self.assertEqual(isinstance(formatted_str, str), True)
+
+ else:
+
+ # Test the __str__ method which returns encoded bytes in python2
+ formatted_bytes = "%s" % (e,)
+ self.assertEqual(isinstance(formatted_bytes, bytes), True)
diff --git a/portage_with_autodep/pym/portage/tests/util/__init__.py b/portage_with_autodep/pym/portage/tests/util/__init__.py
new file mode 100644
index 0000000..69ce189
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage.util/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
diff --git a/portage_with_autodep/pym/portage/tests/util/__test__ b/portage_with_autodep/pym/portage/tests/util/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/__test__
diff --git a/portage_with_autodep/pym/portage/tests/util/test_digraph.py b/portage_with_autodep/pym/portage/tests/util/test_digraph.py
new file mode 100644
index 0000000..b65c0b1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_digraph.py
@@ -0,0 +1,201 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util.digraph import digraph
+#~ from portage.util import noiselimit
+import portage.util
+
+class DigraphTest(TestCase):
+
+ def testBackwardCompatibility(self):
+ g = digraph()
+ f = g.copy()
+ g.addnode("A", None)
+ self.assertEqual("A" in g, True)
+ self.assertEqual(bool(g), True)
+ self.assertEqual(g.allnodes(), ["A"])
+ self.assertEqual(g.allzeros(), ["A"])
+ self.assertEqual(g.hasnode("A"), True)
+
+ def testDigraphEmptyGraph(self):
+ g = digraph()
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), False)
+ self.assertEqual(x.contains("A"), False)
+ self.assertEqual(x.firstzero(), None)
+ self.assertRaises(KeyError, x.remove, "A")
+ x.delnode("A")
+ self.assertEqual(list(x), [])
+ self.assertEqual(x.get("A"), None)
+ self.assertEqual(x.get("A", "default"), "default")
+ self.assertEqual(x.all_nodes(), [])
+ self.assertEqual(x.leaf_nodes(), [])
+ self.assertEqual(x.root_nodes(), [])
+ self.assertRaises(KeyError, x.child_nodes, "A")
+ self.assertRaises(KeyError, x.parent_nodes, "A")
+ self.assertEqual(x.hasallzeros(), True)
+ self.assertRaises(KeyError, list, x.bfs("A"))
+ self.assertRaises(KeyError, x.shortest_path, "A", "B")
+ self.assertRaises(KeyError, x.remove_edge, "A", "B")
+ self.assertEqual(x.get_cycles(), [])
+ x.difference_update("A")
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+
+ def testDigraphCircle(self):
+ g = digraph()
+ g.add("A", "B", -1)
+ g.add("B", "C", 0)
+ g.add("C", "D", 1)
+ g.add("D", "A", 2)
+
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), True)
+ self.assertEqual(x.contains("A"), True)
+ self.assertEqual(x.firstzero(), None)
+ self.assertRaises(KeyError, x.remove, "Z")
+ x.delnode("Z")
+ self.assertEqual(list(x), ["A", "B", "C", "D"])
+ self.assertEqual(x.get("A"), "A")
+ self.assertEqual(x.get("A", "default"), "A")
+ self.assertEqual(x.all_nodes(), ["A", "B", "C", "D"])
+ self.assertEqual(x.leaf_nodes(), [])
+ self.assertEqual(x.root_nodes(), [])
+ self.assertEqual(x.child_nodes("A"), ["D"])
+ self.assertEqual(x.child_nodes("A", ignore_priority=2), [])
+ self.assertEqual(x.parent_nodes("A"), ["B"])
+ self.assertEqual(x.parent_nodes("A", ignore_priority=-2), ["B"])
+ self.assertEqual(x.parent_nodes("A", ignore_priority=-1), [])
+ self.assertEqual(x.hasallzeros(), False)
+ self.assertEqual(list(x.bfs("A")), [(None, "A"), ("A", "D"), ("D", "C"), ("C", "B")])
+ self.assertEqual(x.shortest_path("A", "D"), ["A", "D"])
+ self.assertEqual(x.shortest_path("D", "A"), ["D", "C", "B", "A"])
+ self.assertEqual(x.shortest_path("A", "D", ignore_priority=2), None)
+ self.assertEqual(x.shortest_path("D", "A", ignore_priority=-2), ["D", "C", "B", "A"])
+ cycles = set(tuple(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set([("D", "C", "B", "A"), ("C", "B", "A", "D"), ("B", "A", "D", "C"), \
+ ("A", "D", "C", "B")]))
+ x.remove_edge("A", "B")
+ self.assertEqual(x.get_cycles(), [])
+ x.difference_update(["D"])
+ self.assertEqual(x.all_nodes(), ["A", "B", "C"])
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+
+ def testDigraphTree(self):
+ g = digraph()
+ g.add("B", "A", -1)
+ g.add("C", "A", 0)
+ g.add("D", "C", 1)
+ g.add("E", "C", 2)
+
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), True)
+ self.assertEqual(x.contains("A"), True)
+ self.assertEqual(x.firstzero(), "B")
+ self.assertRaises(KeyError, x.remove, "Z")
+ x.delnode("Z")
+ self.assertEqual(set(x), set(["A", "B", "C", "D", "E"]))
+ self.assertEqual(x.get("A"), "A")
+ self.assertEqual(x.get("A", "default"), "A")
+ self.assertEqual(set(x.all_nodes()), set(["A", "B", "C", "D", "E"]))
+ self.assertEqual(set(x.leaf_nodes()), set(["B", "D", "E"]))
+ self.assertEqual(set(x.leaf_nodes(ignore_priority=0)), set(["A", "B", "D", "E"]))
+ self.assertEqual(x.root_nodes(), ["A"])
+ self.assertEqual(set(x.root_nodes(ignore_priority=0)), set(["A", "B", "C"]))
+ self.assertEqual(set(x.child_nodes("A")), set(["B", "C"]))
+ self.assertEqual(x.child_nodes("A", ignore_priority=2), [])
+ self.assertEqual(x.parent_nodes("B"), ["A"])
+ self.assertEqual(x.parent_nodes("B", ignore_priority=-2), ["A"])
+ self.assertEqual(x.parent_nodes("B", ignore_priority=-1), [])
+ self.assertEqual(x.hasallzeros(), False)
+ self.assertEqual(list(x.bfs("A")), [(None, "A"), ("A", "C"), ("A", "B"), ("C", "E"), ("C", "D")])
+ self.assertEqual(x.shortest_path("A", "D"), ["A", "C", "D"])
+ self.assertEqual(x.shortest_path("D", "A"), None)
+ self.assertEqual(x.shortest_path("A", "D", ignore_priority=2), None)
+ cycles = set(tuple(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set())
+ x.remove("D")
+ self.assertEqual(set(x.all_nodes()), set(["A", "B", "C", "E"]))
+ x.remove("C")
+ self.assertEqual(set(x.all_nodes()), set(["A", "B", "E"]))
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+ self.assertRaises(KeyError, x.remove_edge, "A", "E")
+
+ def testDigraphCompleteGraph(self):
+ g = digraph()
+ g.add("A", "B", -1)
+ g.add("B", "A", 1)
+ g.add("A", "C", 1)
+ g.add("C", "A", -1)
+ g.add("C", "B", 1)
+ g.add("B", "C", 1)
+
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), True)
+ self.assertEqual(x.contains("A"), True)
+ self.assertEqual(x.firstzero(), None)
+ self.assertRaises(KeyError, x.remove, "Z")
+ x.delnode("Z")
+ self.assertEqual(list(x), ["A", "B", "C"])
+ self.assertEqual(x.get("A"), "A")
+ self.assertEqual(x.get("A", "default"), "A")
+ self.assertEqual(x.all_nodes(), ["A", "B", "C"])
+ self.assertEqual(x.leaf_nodes(), [])
+ self.assertEqual(x.root_nodes(), [])
+ self.assertEqual(set(x.child_nodes("A")), set(["B", "C"]))
+ self.assertEqual(x.child_nodes("A", ignore_priority=0), ["B"])
+ self.assertEqual(set(x.parent_nodes("A")), set(["B", "C"]))
+ self.assertEqual(x.parent_nodes("A", ignore_priority=0), ["C"])
+ self.assertEqual(x.parent_nodes("A", ignore_priority=1), [])
+ self.assertEqual(x.hasallzeros(), False)
+ self.assertEqual(list(x.bfs("A")), [(None, "A"), ("A", "C"), ("A", "B")])
+ self.assertEqual(x.shortest_path("A", "C"), ["A", "C"])
+ self.assertEqual(x.shortest_path("C", "A"), ["C", "A"])
+ self.assertEqual(x.shortest_path("A", "C", ignore_priority=0), ["A", "B", "C"])
+ self.assertEqual(x.shortest_path("C", "A", ignore_priority=0), ["C", "A"])
+ cycles = set(tuple(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set([("C", "A"), ("A", "B"), ("A", "C")]))
+ x.remove_edge("A", "B")
+ self.assertEqual(x.get_cycles(), [["C", "A"], ["A", "C"], ["C", "B"]])
+ x.difference_update(["C"])
+ self.assertEqual(x.all_nodes(), ["A", "B"])
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+
+ def testDigraphIgnorePriority(self):
+
+ def always_true(dummy):
+ return True
+
+ def always_false(dummy):
+ return False
+
+ g = digraph()
+ g.add("A", "B")
+
+ self.assertEqual(g.parent_nodes("A"), ["B"])
+ self.assertEqual(g.parent_nodes("A", ignore_priority=always_false), ["B"])
+ self.assertEqual(g.parent_nodes("A", ignore_priority=always_true), [])
+
+ self.assertEqual(g.child_nodes("B"), ["A"])
+ self.assertEqual(g.child_nodes("B", ignore_priority=always_false), ["A"])
+ self.assertEqual(g.child_nodes("B", ignore_priority=always_true), [])
+
+ self.assertEqual(g.leaf_nodes(), ["A"])
+ self.assertEqual(g.leaf_nodes(ignore_priority=always_false), ["A"])
+ self.assertEqual(g.leaf_nodes(ignore_priority=always_true), ["A", "B"])
+
+ self.assertEqual(g.root_nodes(), ["B"])
+ self.assertEqual(g.root_nodes(ignore_priority=always_false), ["B"])
+ self.assertEqual(g.root_nodes(ignore_priority=always_true), ["A", "B"])
diff --git a/portage_with_autodep/pym/portage/tests/util/test_getconfig.py b/portage_with_autodep/pym/portage/tests/util/test_getconfig.py
new file mode 100644
index 0000000..22e0bfc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_getconfig.py
@@ -0,0 +1,29 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.const import PORTAGE_BASE_PATH
+from portage.tests import TestCase
+from portage.util import getconfig
+
+class GetConfigTestCase(TestCase):
+ """
+ Test that getconfig() produces that same result as bash would when
+ sourcing the same input.
+ """
+
+ _cases = {
+ 'FETCHCOMMAND' : '/usr/bin/wget -t 3 -T 60 --passive-ftp -O "${DISTDIR}/${FILE}" "${URI}"',
+ 'FETCHCOMMAND_RSYNC' : 'rsync -avP "${URI}" "${DISTDIR}/${FILE}"',
+ 'FETCHCOMMAND_SFTP' : 'bash -c "x=\\${2#sftp://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec sftp -P \\${port} \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" sftp "${DISTDIR}/${FILE}" "${URI}"',
+ 'FETCHCOMMAND_SSH' : 'bash -c "x=\\${2#ssh://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec rsync --rsh=\\"ssh -p\\${port}\\" -avP \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" rsync "${DISTDIR}/${FILE}" "${URI}"',
+ 'PORTAGE_ELOG_MAILSUBJECT' : '[portage] ebuild log for ${PACKAGE} on ${HOST}'
+ }
+
+ def testGetConfig(self):
+
+ make_globals_file = os.path.join(PORTAGE_BASE_PATH,
+ 'cnf', 'make.globals')
+ d = getconfig(make_globals_file)
+ for k, v in self._cases.items():
+ self.assertEqual(d[k], v)
diff --git a/portage_with_autodep/pym/portage/tests/util/test_grabdict.py b/portage_with_autodep/pym/portage/tests/util/test_grabdict.py
new file mode 100644
index 0000000..e62a75d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_grabdict.py
@@ -0,0 +1,11 @@
+# test_grabDict.py -- Portage Unit Testing Functionality
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+#from portage.util import grabdict
+
+class GrabDictTestCase(TestCase):
+
+ def testGrabDictPass(self):
+ pass
diff --git a/portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py b/portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py
new file mode 100644
index 0000000..f993886
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py
@@ -0,0 +1,14 @@
+# test_normalizePath.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+
+class NormalizePathTestCase(TestCase):
+
+ def testNormalizePath(self):
+
+ from portage.util import normalize_path
+ path = "///foo/bar/baz"
+ good = "/foo/bar/baz"
+ self.assertEqual(normalize_path(path), good)
diff --git a/portage_with_autodep/pym/portage/tests/util/test_stackDictList.py b/portage_with_autodep/pym/portage/tests/util/test_stackDictList.py
new file mode 100644
index 0000000..678001c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_stackDictList.py
@@ -0,0 +1,17 @@
+# test_stackDictList.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+
+class StackDictListTestCase(TestCase):
+
+ def testStackDictList(self):
+ from portage.util import stack_dictlist
+
+ tests = [ ({'a':'b'},{'x':'y'},False,{'a':['b'],'x':['y']}) ]
+ tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-*']},True,{} ))
+ tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-x86']},True,{'KEYWORDS':['alpha']} ))
+ for test in tests:
+ self.assertEqual(
+ stack_dictlist([test[0],test[1]],incremental=test[2]), test[3] )
diff --git a/portage_with_autodep/pym/portage/tests/util/test_stackDicts.py b/portage_with_autodep/pym/portage/tests/util/test_stackDicts.py
new file mode 100644
index 0000000..0d2cadd
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_stackDicts.py
@@ -0,0 +1,36 @@
+# test_stackDicts.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import stack_dicts
+
+
+class StackDictsTestCase(TestCase):
+
+ def testStackDictsPass(self):
+
+ tests = [ ( [ { "a":"b" }, { "b":"c" } ], { "a":"b", "b":"c" },
+ False, [], False ),
+ ( [ { "a":"b" }, { "a":"c" } ], { "a":"b c" },
+ True, [], False ),
+ ( [ { "a":"b" }, { "a":"c" } ], { "a":"b c" },
+ False, ["a"], False ),
+ ( [ { "a":"b" }, None ], { "a":"b" },
+ False, [], True ),
+ ( [ None ], {}, False, [], False ),
+ ( [ None, {}], {}, False, [], True ) ]
+
+
+ for test in tests:
+ result = stack_dicts( test[0], test[2], test[3], test[4] )
+ self.assertEqual( result, test[1] )
+
+ def testStackDictsFail(self):
+
+ tests = [ ( [ None, {} ], None, False, [], True ),
+ ( [ { "a":"b"}, {"a":"c" } ], { "a":"b c" },
+ False, [], False ) ]
+ for test in tests:
+ result = stack_dicts( test[0], test[2], test[3], test[4] )
+ self.assertNotEqual( result , test[1] )
diff --git a/portage_with_autodep/pym/portage/tests/util/test_stackLists.py b/portage_with_autodep/pym/portage/tests/util/test_stackLists.py
new file mode 100644
index 0000000..8d01ea5
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_stackLists.py
@@ -0,0 +1,19 @@
+# test_stackLists.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import stack_lists
+
+class StackListsTestCase(TestCase):
+
+ def testStackLists(self):
+
+ tests = [ ( [ ['a','b','c'], ['d','e','f'] ], ['a','c','b','e','d','f'], False ),
+ ( [ ['a','x'], ['b','x'] ], ['a','x','b'], False ),
+ ( [ ['a','b','c'], ['-*'] ], [], True ),
+ ( [ ['a'], ['-a'] ], [], True ) ]
+
+ for test in tests:
+ result = stack_lists( test[0], test[2] )
+ self.assertEqual( result , test[1] )
diff --git a/portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py b/portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py
new file mode 100644
index 0000000..2a1a209
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py
@@ -0,0 +1,24 @@
+# test_uniqueArray.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.util import unique_array
+
+class UniqueArrayTestCase(TestCase):
+
+ def testUniqueArrayPass(self):
+ """
+ test portage.util.uniqueArray()
+ """
+
+ tests = [ ( ["a","a","a",os,os,[],[],[]], ['a',os,[]] ),
+ ( [1,1,1,2,3,4,4] , [1,2,3,4]) ]
+
+ for test in tests:
+ result = unique_array( test[0] )
+ for item in test[1]:
+ number = result.count(item)
+ self.assertFalse( number is not 1, msg="%s contains %s of %s, \
+ should be only 1" % (result, number, item) )
diff --git a/portage_with_autodep/pym/portage/tests/util/test_varExpand.py b/portage_with_autodep/pym/portage/tests/util/test_varExpand.py
new file mode 100644
index 0000000..7b528d6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_varExpand.py
@@ -0,0 +1,92 @@
+# test_varExpand.py -- Portage Unit Testing Functionality
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import varexpand
+
+class VarExpandTestCase(TestCase):
+
+ def testVarExpandPass(self):
+
+ varDict = { "a":"5", "b":"7", "c":"-5" }
+ for key in varDict:
+ result = varexpand( "$%s" % key, varDict )
+
+ self.assertFalse( result != varDict[key],
+ msg="Got %s != %s, from varexpand( %s, %s )" % \
+ ( result, varDict[key], "$%s" % key, varDict ) )
+ result = varexpand( "${%s}" % key, varDict )
+ self.assertFalse( result != varDict[key],
+ msg="Got %s != %s, from varexpand( %s, %s )" % \
+ ( result, varDict[key], "${%s}" % key, varDict ) )
+
+ def testVarExpandBackslashes(self):
+ """
+ We want to behave like bash does when expanding a variable
+ assignment in a sourced file, in which case it performs
+ backslash removal for \\ and \$ but nothing more. It also
+ removes escaped newline characters. Note that we don't
+ handle escaped quotes here, since getconfig() uses shlex
+ to handle that earlier.
+ """
+
+ varDict = {}
+ tests = [
+ ("\\", "\\"),
+ ("\\\\", "\\"),
+ ("\\\\\\", "\\\\"),
+ ("\\\\\\\\", "\\\\"),
+ ("\\$", "$"),
+ ("\\\\$", "\\$"),
+ ("\\a", "\\a"),
+ ("\\b", "\\b"),
+ ("\\n", "\\n"),
+ ("\\r", "\\r"),
+ ("\\t", "\\t"),
+ ("\\\n", ""),
+ ("\\\"", "\\\""),
+ ("\\'", "\\'"),
+ ]
+ for test in tests:
+ result = varexpand( test[0], varDict )
+ self.assertFalse( result != test[1],
+ msg="Got %s != %s from varexpand( %s, %s )" \
+ % ( result, test[1], test[0], varDict ) )
+
+ def testVarExpandDoubleQuotes(self):
+
+ varDict = { "a":"5" }
+ tests = [ ("\"${a}\"", "\"5\"") ]
+ for test in tests:
+ result = varexpand( test[0], varDict )
+ self.assertFalse( result != test[1],
+ msg="Got %s != %s from varexpand( %s, %s )" \
+ % ( result, test[1], test[0], varDict ) )
+
+ def testVarExpandSingleQuotes(self):
+
+ varDict = { "a":"5" }
+ tests = [ ("\'${a}\'", "\'${a}\'") ]
+ for test in tests:
+ result = varexpand( test[0], varDict )
+ self.assertFalse( result != test[1],
+ msg="Got %s != %s from varexpand( %s, %s )" \
+ % ( result, test[1], test[0], varDict ) )
+
+ def testVarExpandFail(self):
+
+ varDict = { "a":"5", "b":"7", "c":"15" }
+
+ testVars = [ "fail" ]
+
+ for var in testVars:
+ result = varexpand( "$%s" % var, varDict )
+ self.assertFalse( len(result),
+ msg="Got %s == %s, from varexpand( %s, %s )" \
+ % ( result, var, "$%s" % var, varDict ) )
+
+ result = varexpand( "${%s}" % var, varDict )
+ self.assertFalse( len(result),
+ msg="Got %s == %s, from varexpand( %s, %s )" \
+ % ( result, var, "${%s}" % var, varDict ) )
diff --git a/portage_with_autodep/pym/portage/tests/versions/__init__.py b/portage_with_autodep/pym/portage/tests/versions/__init__.py
new file mode 100644
index 0000000..2b14180
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/versions/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.versions/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/tests/versions/__test__ b/portage_with_autodep/pym/portage/tests/versions/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/versions/__test__
diff --git a/portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py b/portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py
new file mode 100644
index 0000000..a223d78
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py
@@ -0,0 +1,16 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.versions import cpv_sort_key
+
+class CpvSortKeyTestCase(TestCase):
+
+ def testCpvSortKey(self):
+
+ tests = [ (("a/b-2_alpha", "a", "b", "a/b-2", "a/a-1", "a/b-1"),
+ ( "a", "a/a-1", "a/b-1", "a/b-2_alpha", "a/b-2", "b")),
+ ]
+
+ for test in tests:
+ self.assertEqual( tuple(sorted(test[0], key=cpv_sort_key())), test[1] )
diff --git a/portage_with_autodep/pym/portage/tests/versions/test_vercmp.py b/portage_with_autodep/pym/portage/tests/versions/test_vercmp.py
new file mode 100644
index 0000000..aa7969c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/versions/test_vercmp.py
@@ -0,0 +1,80 @@
+# test_vercmp.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.versions import vercmp
+
+class VerCmpTestCase(TestCase):
+ """ A simple testCase for portage.versions.vercmp()
+ """
+
+ def testVerCmpGreater(self):
+
+ tests = [ ( "6.0", "5.0"), ("5.0","5"),
+ ("1.0-r1", "1.0-r0"),
+ ("1.0-r1", "1.0"),
+ ("cvs.9999", "9999"),
+ ("999999999999999999999999999999", "999999999999999999999999999998"),
+ ("1.0.0", "1.0"),
+ ("1.0.0", "1.0b"),
+ ("1b", "1"),
+ ("1b_p1", "1_p1"),
+ ("1.1b", "1.1"),
+ ("12.2.5", "12.2b"),
+ ]
+ for test in tests:
+ self.assertFalse( vercmp( test[0], test[1] ) <= 0, msg="%s < %s? Wrong!" % (test[0],test[1]) )
+
+ def testVerCmpLess(self):
+ """
+ pre < alpha < beta < rc < p -> test each of these, they are inductive (or should be..)
+ """
+ tests = [ ( "4.0", "5.0"), ("5", "5.0"), ("1.0_pre2","1.0_p2"),
+ ("1.0_alpha2", "1.0_p2"),("1.0_alpha1", "1.0_beta1"),("1.0_beta3","1.0_rc3"),
+ ("1.001000000000000000001", "1.001000000000000000002"),
+ ("1.00100000000", "1.0010000000000000001"),
+ ("9999", "cvs.9999"),
+ ("999999999999999999999999999998", "999999999999999999999999999999"),
+ ("1.01", "1.1"),
+ ("1.0-r0", "1.0-r1"),
+ ("1.0", "1.0-r1"),
+ ("1.0", "1.0.0"),
+ ("1.0b", "1.0.0"),
+ ("1_p1", "1b_p1"),
+ ("1", "1b"),
+ ("1.1", "1.1b"),
+ ("12.2b", "12.2.5"),
+ ]
+ for test in tests:
+ self.assertFalse( vercmp( test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0],test[1]))
+
+
+ def testVerCmpEqual(self):
+
+ tests = [ ("4.0", "4.0"),
+ ("1.0", "1.0"),
+ ("1.0-r0", "1.0"),
+ ("1.0", "1.0-r0"),
+ ("1.0-r0", "1.0-r0"),
+ ("1.0-r1", "1.0-r1")]
+ for test in tests:
+ self.assertFalse( vercmp( test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0],test[1]))
+
+ def testVerNotEqual(self):
+
+ tests = [ ("1","2"),("1.0_alpha","1.0_pre"),("1.0_beta","1.0_alpha"),
+ ("0", "0.0"),
+ ("cvs.9999", "9999"),
+ ("1.0-r0", "1.0-r1"),
+ ("1.0-r1", "1.0-r0"),
+ ("1.0", "1.0-r1"),
+ ("1.0-r1", "1.0"),
+ ("1.0", "1.0.0"),
+ ("1_p1", "1b_p1"),
+ ("1b", "1"),
+ ("1.1b", "1.1"),
+ ("12.2b", "12.2"),
+ ]
+ for test in tests:
+ self.assertFalse( vercmp( test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0],test[1]))
diff --git a/portage_with_autodep/pym/portage/tests/xpak/__init__.py b/portage_with_autodep/pym/portage/tests/xpak/__init__.py
new file mode 100644
index 0000000..9c3f524
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/xpak/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.dep/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/tests/xpak/__test__ b/portage_with_autodep/pym/portage/tests/xpak/__test__
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/xpak/__test__
diff --git a/portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py b/portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py
new file mode 100644
index 0000000..2da5735
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py
@@ -0,0 +1,16 @@
+# xpak/test_decodeint.py
+# Copright Gentoo Foundation 2006
+# Portage Unit Testing Functionality
+
+from portage.tests import TestCase
+from portage.xpak import decodeint, encodeint
+
+class testDecodeIntTestCase(TestCase):
+
+ def testDecodeInt(self):
+
+ for n in range(1000):
+ self.assertEqual(decodeint(encodeint(n)), n)
+
+ for n in (2 ** 32 - 1,):
+ self.assertEqual(decodeint(encodeint(n)), n)
diff --git a/portage_with_autodep/pym/portage/update.py b/portage_with_autodep/pym/portage/update.py
new file mode 100644
index 0000000..52ab506
--- /dev/null
+++ b/portage_with_autodep/pym/portage/update.py
@@ -0,0 +1,320 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import re
+import stat
+import sys
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dep:Atom,dep_getkey,isvalidatom,' + \
+ 'remove_slot',
+ 'portage.util:ConfigProtect,new_protect_filename,' + \
+ 'normalize_path,write_atomic,writemsg',
+ 'portage.util.listdir:_ignorecvs_dirs',
+ 'portage.versions:ververify'
+)
+
+from portage.const import USER_CONFIG_PATH
+from portage.exception import DirectoryNotFound, InvalidAtom, PortageException
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+ignored_dbentries = ("CONTENTS", "environment.bz2")
+
+def update_dbentry(update_cmd, mycontent):
+ if update_cmd[0] == "move":
+ old_value = str(update_cmd[1])
+ if old_value in mycontent:
+ new_value = str(update_cmd[2])
+ old_value = re.escape(old_value);
+ mycontent = re.sub(old_value+"(:|$|\\s)", new_value+"\\1", mycontent)
+ def myreplace(matchobj):
+ # Strip slot and * operator if necessary
+ # so that ververify works.
+ ver = remove_slot(matchobj.group(2))
+ ver = ver.rstrip("*")
+ if ververify(ver):
+ return "%s-%s" % (new_value, matchobj.group(2))
+ else:
+ return "".join(matchobj.groups())
+ mycontent = re.sub("(%s-)(\\S*)" % old_value, myreplace, mycontent)
+ elif update_cmd[0] == "slotmove" and update_cmd[1].operator is None:
+ pkg, origslot, newslot = update_cmd[1:]
+ old_value = "%s:%s" % (pkg, origslot)
+ if old_value in mycontent:
+ old_value = re.escape(old_value)
+ new_value = "%s:%s" % (pkg, newslot)
+ mycontent = re.sub(old_value+"($|\\s)", new_value+"\\1", mycontent)
+ return mycontent
+
+def update_dbentries(update_iter, mydata):
+ """Performs update commands and returns a
+ dict containing only the updated items."""
+ updated_items = {}
+ for k, mycontent in mydata.items():
+ k_unicode = _unicode_decode(k,
+ encoding=_encodings['repo.content'], errors='replace')
+ if k_unicode not in ignored_dbentries:
+ orig_content = mycontent
+ mycontent = _unicode_decode(mycontent,
+ encoding=_encodings['repo.content'], errors='replace')
+ is_encoded = mycontent is not orig_content
+ orig_content = mycontent
+ for update_cmd in update_iter:
+ mycontent = update_dbentry(update_cmd, mycontent)
+ if mycontent != orig_content:
+ if is_encoded:
+ mycontent = _unicode_encode(mycontent,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ updated_items[k] = mycontent
+ return updated_items
+
+def fixdbentries(update_iter, dbdir):
+ """Performs update commands which result in search and replace operations
+ for each of the files in dbdir (excluding CONTENTS and environment.bz2).
+ Returns True when actual modifications are necessary and False otherwise."""
+ mydata = {}
+ for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
+ file_path = os.path.join(dbdir, myfile)
+ mydata[myfile] = io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace').read()
+ updated_items = update_dbentries(update_iter, mydata)
+ for myfile, mycontent in updated_items.items():
+ file_path = os.path.join(dbdir, myfile)
+ write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
+ return len(updated_items) > 0
+
+def grab_updates(updpath, prev_mtimes=None):
+ """Returns all the updates from the given directory as a sorted list of
+ tuples, each containing (file_path, statobj, content). If prev_mtimes is
+ given then updates are only returned if one or more files have different
+ mtimes. When a change is detected for a given file, updates will be
+ returned for that file and any files that come after it in the entire
+ sequence. This ensures that all relevant updates are returned for cases
+ in which the destination package of an earlier move corresponds to
+ the source package of a move that comes somewhere later in the entire
+ sequence of files.
+ """
+ try:
+ mylist = os.listdir(updpath)
+ except OSError as oe:
+ if oe.errno == errno.ENOENT:
+ raise DirectoryNotFound(updpath)
+ raise
+ if prev_mtimes is None:
+ prev_mtimes = {}
+ # validate the file name (filter out CVS directory, etc...)
+ mylist = [myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"]
+ if len(mylist) == 0:
+ return []
+
+ # update names are mangled to make them sort properly
+ mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
+ mylist.sort()
+ mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]
+
+ update_data = []
+ for myfile in mylist:
+ file_path = os.path.join(updpath, myfile)
+ mystat = os.stat(file_path)
+ if update_data or \
+ file_path not in prev_mtimes or \
+ long(prev_mtimes[file_path]) != mystat[stat.ST_MTIME]:
+ content = io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ).read()
+ update_data.append((file_path, mystat, content))
+ return update_data
+
+def parse_updates(mycontent):
+ """Valid updates are returned as a list of split update commands."""
+ myupd = []
+ errors = []
+ mylines = mycontent.splitlines()
+ for myline in mylines:
+ mysplit = myline.split()
+ if len(mysplit) == 0:
+ continue
+ if mysplit[0] not in ("move", "slotmove"):
+ errors.append(_("ERROR: Update type not recognized '%s'") % myline)
+ continue
+ if mysplit[0] == "move":
+ if len(mysplit) != 3:
+ errors.append(_("ERROR: Update command invalid '%s'") % myline)
+ continue
+ for i in (1, 2):
+ try:
+ atom = Atom(mysplit[i])
+ except InvalidAtom:
+ atom = None
+ else:
+ if atom.blocker or atom != atom.cp:
+ atom = None
+ if atom is not None:
+ mysplit[i] = atom
+ else:
+ errors.append(
+ _("ERROR: Malformed update entry '%s'") % myline)
+ break
+ if mysplit[0] == "slotmove":
+ if len(mysplit)!=4:
+ errors.append(_("ERROR: Update command invalid '%s'") % myline)
+ continue
+ pkg, origslot, newslot = mysplit[1], mysplit[2], mysplit[3]
+ try:
+ atom = Atom(pkg)
+ except InvalidAtom:
+ atom = None
+ else:
+ if atom.blocker:
+ atom = None
+ if atom is not None:
+ mysplit[1] = atom
+ else:
+ errors.append(_("ERROR: Malformed update entry '%s'") % myline)
+ continue
+
+ # The list of valid updates is filtered by continue statements above.
+ myupd.append(mysplit)
+ return myupd, errors
+
+def update_config_files(config_root, protect, protect_mask, update_iter, match_callback = None):
+ """Perform global updates on /etc/portage/package.*.
+ config_root - location of files to update
+ protect - list of paths from CONFIG_PROTECT
+ protect_mask - list of paths from CONFIG_PROTECT_MASK
+ update_iter - list of update commands as returned from parse_updates(),
+ or dict of {repo_name: list}
+ match_callback - a callback which will be called with three arguments:
+ match_callback(repo_name, old_atom, new_atom)
+ and should return boolean value determining whether to perform the update"""
+
+ repo_dict = None
+ if isinstance(update_iter, dict):
+ repo_dict = update_iter
+ if match_callback is None:
+ def match_callback(repo_name, atoma, atomb):
+ return True
+ config_root = normalize_path(config_root)
+ update_files = {}
+ file_contents = {}
+ myxfiles = [
+ "package.accept_keywords", "package.env",
+ "package.keywords", "package.license",
+ "package.mask", "package.properties",
+ "package.unmask", "package.use"
+ ]
+ myxfiles += [os.path.join("profile", x) for x in myxfiles]
+ abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
+ recursivefiles = []
+ for x in myxfiles:
+ config_file = os.path.join(abs_user_config, x)
+ if os.path.isdir(config_file):
+ for parent, dirs, files in os.walk(config_file):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ for y_enc in list(dirs):
+ try:
+ y = _unicode_decode(y_enc,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ dirs.remove(y_enc)
+ continue
+ if y.startswith(".") or y in _ignorecvs_dirs:
+ dirs.remove(y_enc)
+ for y in files:
+ try:
+ y = _unicode_decode(y,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if y.startswith("."):
+ continue
+ recursivefiles.append(
+ os.path.join(parent, y)[len(abs_user_config) + 1:])
+ else:
+ recursivefiles.append(x)
+ myxfiles = recursivefiles
+ for x in myxfiles:
+ try:
+ file_contents[x] = io.open(
+ _unicode_encode(os.path.join(abs_user_config, x),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'],
+ errors='replace').readlines()
+ except IOError:
+ continue
+
+ # update /etc/portage/packages.*
+ ignore_line_re = re.compile(r'^#|^\s*$')
+ if repo_dict is None:
+ update_items = [(None, update_iter)]
+ else:
+ update_items = [x for x in repo_dict.items() if x[0] != 'DEFAULT']
+ for repo_name, update_iter in update_items:
+ for update_cmd in update_iter:
+ for x, contents in file_contents.items():
+ skip_next = False
+ for pos, line in enumerate(contents):
+ if skip_next:
+ skip_next = False
+ continue
+ if ignore_line_re.match(line):
+ continue
+ atom = line.split()[0]
+ if atom[:1] == "-":
+ # package.mask supports incrementals
+ atom = atom[1:]
+ if not isvalidatom(atom):
+ continue
+ new_atom = update_dbentry(update_cmd, atom)
+ if atom != new_atom:
+ if match_callback(repo_name, atom, new_atom):
+ # add a comment with the update command, so
+ # the user can clearly see what happened
+ contents[pos] = "# %s\n" % \
+ " ".join("%s" % (x,) for x in update_cmd)
+ contents.insert(pos + 1,
+ line.replace("%s" % (atom,),
+ "%s" % (new_atom,), 1))
+ # we've inserted an additional line, so we need to
+ # skip it when it's reached in the next iteration
+ skip_next = True
+ update_files[x] = 1
+ sys.stdout.write("p")
+ sys.stdout.flush()
+
+ protect_obj = ConfigProtect(
+ config_root, protect, protect_mask)
+ for x in update_files:
+ updating_file = os.path.join(abs_user_config, x)
+ if protect_obj.isprotected(updating_file):
+ updating_file = new_protect_filename(updating_file)
+ try:
+ write_atomic(updating_file, "".join(file_contents[x]))
+ except PortageException as e:
+ writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! An error occurred while updating a config file:") + \
+ " '%s'\n" % updating_file, noiselevel=-1)
+ continue
+
+def dep_transform(mydep, oldkey, newkey):
+ if dep_getkey(mydep) == oldkey:
+ return mydep.replace(oldkey, newkey, 1)
+ return mydep
diff --git a/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py
new file mode 100644
index 0000000..5cb9747
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py
@@ -0,0 +1,76 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['ExtractKernelVersion']
+
+import io
+
+from portage import os, _encodings, _unicode_encode
+from portage.util import getconfig, grabfile
+
+def ExtractKernelVersion(base_dir):
+ """
+ Try to figure out what kernel version we are running
+ @param base_dir: Path to sources (usually /usr/src/linux)
+ @type base_dir: string
+ @rtype: tuple( version[string], error[string])
+ @returns:
+ 1. tuple( version[string], error[string])
+ Either version or error is populated (but never both)
+
+ """
+ lines = []
+ pathname = os.path.join(base_dir, 'Makefile')
+ try:
+ f = io.open(_unicode_encode(pathname,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['content'], errors='replace')
+ except OSError as details:
+ return (None, str(details))
+ except IOError as details:
+ return (None, str(details))
+
+ try:
+ for i in range(4):
+ lines.append(f.readline())
+ except OSError as details:
+ return (None, str(details))
+ except IOError as details:
+ return (None, str(details))
+
+ lines = [l.strip() for l in lines]
+
+ version = ''
+
+ #XXX: The following code relies on the ordering of vars within the Makefile
+ for line in lines:
+ # split on the '=' then remove annoying whitespace
+ items = line.split("=")
+ items = [i.strip() for i in items]
+ if items[0] == 'VERSION' or \
+ items[0] == 'PATCHLEVEL':
+ version += items[1]
+ version += "."
+ elif items[0] == 'SUBLEVEL':
+ version += items[1]
+ elif items[0] == 'EXTRAVERSION' and \
+ items[-1] != items[0]:
+ version += items[1]
+
+ # Grab a list of files named localversion* and sort them
+ localversions = os.listdir(base_dir)
+ for x in range(len(localversions)-1,-1,-1):
+ if localversions[x][:12] != "localversion":
+ del localversions[x]
+ localversions.sort()
+
+ # Append the contents of each to the version string, stripping ALL whitespace
+ for lv in localversions:
+ version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
+
+ # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
+ kernelconfig = getconfig(base_dir+"/.config")
+ if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
+ version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
+
+ return (version,None)
diff --git a/portage_with_autodep/pym/portage/util/__init__.py b/portage_with_autodep/pym/portage/util/__init__.py
new file mode 100644
index 0000000..4aa63d5
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/__init__.py
@@ -0,0 +1,1602 @@
+# Copyright 2004-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['apply_permissions', 'apply_recursive_permissions',
+ 'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
+ 'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
+ 'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict',
+ 'grabdict_package', 'grabfile', 'grabfile_package', 'grablines',
+ 'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals',
+ 'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist',
+ 'stack_dicts', 'stack_lists', 'unique_array', 'unique_everseen', 'varexpand',
+ 'write_atomic', 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout']
+
+from copy import deepcopy
+import errno
+import io
+try:
+ from itertools import filterfalse
+except ImportError:
+ from itertools import ifilterfalse as filterfalse
+import logging
+import re
+import shlex
+import stat
+import string
+import sys
+import traceback
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'pickle',
+ 'portage.dep:Atom',
+ 'portage.util.listdir:_ignorecvs_dirs'
+)
+
+from portage import os
+from portage import subprocess_getstatusoutput
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.exception import InvalidAtom, PortageException, FileNotFound, \
+ OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem
+from portage.localization import _
+from portage.proxy.objectproxy import ObjectProxy
+from portage.cache.mappings import UserDict
+
+noiselimit = 0
+
+def initialize_logger(level=logging.WARN):
+ """Sets up basic logging of portage activities
+ Args:
+ level: the level to emit messages at ('info', 'debug', 'warning' ...)
+ Returns:
+ None
+ """
+ logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
+
+def writemsg(mystr,noiselevel=0,fd=None):
+ """Prints out warning and debug messages based on the noiselimit setting"""
+ global noiselimit
+ if fd is None:
+ fd = sys.stderr
+ if noiselevel <= noiselimit:
+ # avoid potential UnicodeEncodeError
+ if isinstance(fd, io.StringIO):
+ mystr = _unicode_decode(mystr,
+ encoding=_encodings['content'], errors='replace')
+ else:
+ mystr = _unicode_encode(mystr,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ if sys.hexversion >= 0x3000000 and fd in (sys.stdout, sys.stderr):
+ fd = fd.buffer
+ fd.write(mystr)
+ fd.flush()
+
+def writemsg_stdout(mystr,noiselevel=0):
+ """Prints messages stdout based on the noiselimit setting"""
+ writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
+
+def writemsg_level(msg, level=0, noiselevel=0):
+ """
+ Show a message for the given level as defined by the logging module
+ (default is 0). When level >= logging.WARNING then the message is
+ sent to stderr, otherwise it is sent to stdout. The noiselevel is
+ passed directly to writemsg().
+
+ @type msg: str
+ @param msg: a message string, including newline if appropriate
+ @type level: int
+ @param level: a numeric logging level (see the logging module)
+ @type noiselevel: int
+ @param noiselevel: passed directly to writemsg
+ """
+ if level >= logging.WARNING:
+ fd = sys.stderr
+ else:
+ fd = sys.stdout
+ writemsg(msg, noiselevel=noiselevel, fd=fd)
+
+def normalize_path(mypath):
+ """
+ os.path.normpath("//foo") returns "//foo" instead of "/foo"
+ We dislike this behavior so we create our own normpath func
+ to fix it.
+ """
+ if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes):
+ path_sep = os.path.sep.encode()
+ else:
+ path_sep = os.path.sep
+
+ if mypath.startswith(path_sep):
+ # posixpath.normpath collapses 3 or more leading slashes to just 1.
+ return os.path.normpath(2*path_sep + mypath)
+ else:
+ return os.path.normpath(mypath)
+
+def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False):
+ """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
+ begins with a #, it is ignored, as are empty lines"""
+
+ mylines=grablines(myfilename, recursive, remember_source_file=True)
+ newlines=[]
+
+ for x, source_file in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ myline = x.split()
+ if x and x[0] != "#":
+ mylinetemp = []
+ for item in myline:
+ if item[:1] != "#":
+ mylinetemp.append(item)
+ else:
+ break
+ myline = mylinetemp
+
+ myline = " ".join(myline)
+ if not myline:
+ continue
+ if myline[0]=="#":
+ # Check if we have a compat-level string. BC-integration data.
+ # '##COMPAT==>N<==' 'some string attached to it'
+ mylinetest = myline.split("<==",1)
+ if len(mylinetest) == 2:
+ myline_potential = mylinetest[1]
+ mylinetest = mylinetest[0].split("##COMPAT==>")
+ if len(mylinetest) == 2:
+ if compat_level >= int(mylinetest[1]):
+ # It's a compat line, and the key matches.
+ newlines.append(myline_potential)
+ continue
+ else:
+ continue
+ if remember_source_file:
+ newlines.append((myline, source_file))
+ else:
+ newlines.append(myline)
+ return newlines
+
+def map_dictlist_vals(func,myDict):
+ """Performs a function on each value of each key in a dictlist.
+ Returns a new dictlist."""
+ new_dl = {}
+ for key in myDict:
+ new_dl[key] = []
+ new_dl[key] = [func(x) for x in myDict[key]]
+ return new_dl
+
+def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
+ """
+ Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->list.
+ Returns a single dict. Higher index in lists is preferenced.
+
+ Example usage:
+ >>> from portage.util import stack_dictlist
+ >>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
+ >>> {'a':'b','x':'y'}
+ >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
+ >>> {'a':['b','c'] }
+ >>> a = {'KEYWORDS':['x86','alpha']}
+ >>> b = {'KEYWORDS':['-x86']}
+ >>> print stack_dictlist( [a,b] )
+ >>> { 'KEYWORDS':['x86','alpha','-x86']}
+ >>> print stack_dictlist( [a,b], incremental=True)
+ >>> { 'KEYWORDS':['alpha'] }
+ >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
+ >>> { 'KEYWORDS':['alpha'] }
+
+ @param original_dicts a list of (dictionary objects or None)
+ @type list
+ @param incremental True or false depending on whether new keys should overwrite
+ keys which already exist.
+ @type boolean
+ @param incrementals A list of items that should be incremental (-foo removes foo from
+ the returned dict).
+ @type list
+ @param ignore_none Appears to be ignored, but probably was used long long ago.
+ @type boolean
+
+ """
+ final_dict = {}
+ for mydict in original_dicts:
+ if mydict is None:
+ continue
+ for y in mydict:
+ if not y in final_dict:
+ final_dict[y] = []
+
+ for thing in mydict[y]:
+ if thing:
+ if incremental or y in incrementals:
+ if thing == "-*":
+ final_dict[y] = []
+ continue
+ elif thing[:1] == '-':
+ try:
+ final_dict[y].remove(thing[1:])
+ except ValueError:
+ pass
+ continue
+ if thing not in final_dict[y]:
+ final_dict[y].append(thing)
+ if y in final_dict and not final_dict[y]:
+ del final_dict[y]
+ return final_dict
+
+def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
+ """Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->string.
+ Returns a single dict."""
+ final_dict = {}
+ for mydict in dicts:
+ if not mydict:
+ continue
+ for k, v in mydict.items():
+ if k in final_dict and (incremental or (k in incrementals)):
+ final_dict[k] += " " + v
+ else:
+ final_dict[k] = v
+ return final_dict
+
+def append_repo(atom_list, repo_name, remember_source_file=False):
+ """
+ Takes a list of valid atoms without repo spec and appends ::repo_name.
+ """
+ if remember_source_file:
+ return [(Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True), source) \
+ for atom, source in atom_list]
+ else:
+ return [Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True) \
+ for atom in atom_list]
+
+def stack_lists(lists, incremental=1, remember_source_file=False,
+ warn_for_unmatched_removal=False, strict_warn_for_unmatched_removal=False, ignore_repo=False):
+ """Stacks an array of list-types into one array. Optionally removing
+ distinct values using '-value' notation. Higher index is preferenced.
+
+ all elements must be hashable."""
+ matched_removals = set()
+ unmatched_removals = {}
+ new_list = {}
+ for sub_list in lists:
+ for token in sub_list:
+ token_key = token
+ if remember_source_file:
+ token, source_file = token
+ else:
+ source_file = False
+
+ if token is None:
+ continue
+
+ if incremental:
+ if token == "-*":
+ new_list.clear()
+ elif token[:1] == '-':
+ matched = False
+ if ignore_repo and not "::" in token:
+ #Let -cat/pkg remove cat/pkg::repo.
+ to_be_removed = []
+ token_slice = token[1:]
+ for atom in new_list:
+ atom_without_repo = atom
+ if atom.repo is not None:
+ # Atom.without_repo instantiates a new Atom,
+ # which is unnecessary here, so use string
+ # replacement instead.
+ atom_without_repo = \
+ atom.replace("::" + atom.repo, "", 1)
+ if atom_without_repo == token_slice:
+ to_be_removed.append(atom)
+ if to_be_removed:
+ matched = True
+ for atom in to_be_removed:
+ new_list.pop(atom)
+ else:
+ try:
+ new_list.pop(token[1:])
+ matched = True
+ except KeyError:
+ pass
+
+ if not matched:
+ if source_file and \
+ (strict_warn_for_unmatched_removal or \
+ token_key not in matched_removals):
+ unmatched_removals.setdefault(source_file, set()).add(token)
+ else:
+ matched_removals.add(token_key)
+ else:
+ new_list[token] = source_file
+ else:
+ new_list[token] = source_file
+
+ if warn_for_unmatched_removal:
+ for source_file, tokens in unmatched_removals.items():
+ if len(tokens) > 3:
+ selected = [tokens.pop(), tokens.pop(), tokens.pop()]
+ writemsg(_("--- Unmatch removal atoms in %s: %s and %s more\n") % \
+ (source_file, ", ".join(selected), len(tokens)),
+ noiselevel=-1)
+ else:
+ writemsg(_("--- Unmatch removal atom(s) in %s: %s\n") % (source_file, ", ".join(tokens)),
+ noiselevel=-1)
+
+ if remember_source_file:
+ return list(new_list.items())
+ else:
+ return list(new_list)
+
+def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
+ """
+ This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
+
+ @param myfilename: file to process
+ @type myfilename: string (path)
+ @param juststrings: only return strings
+ @type juststrings: Boolean (integer)
+ @param empty: Ignore certain lines
+ @type empty: Boolean (integer)
+ @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
+ @type recursive: Boolean (integer)
+ @param incremental: Append to the return list, don't overwrite
+ @type incremental: Boolean (integer)
+ @rtype: Dictionary
+ @returns:
+ 1. Returns the lines in a file in a dictionary, for example:
+ 'sys-apps/portage x86 amd64 ppc'
+ would return
+ { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
+ the line syntax is key : [list of values]
+ """
+ newdict={}
+ for x in grablines(myfilename, recursive):
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ if x[0] == "#":
+ continue
+ myline=x.split()
+ mylinetemp = []
+ for item in myline:
+ if item[:1] != "#":
+ mylinetemp.append(item)
+ else:
+ break
+ myline = mylinetemp
+ if len(myline) < 2 and empty == 0:
+ continue
+ if len(myline) < 1 and empty == 1:
+ continue
+ if incremental:
+ newdict.setdefault(myline[0], []).extend(myline[1:])
+ else:
+ newdict[myline[0]] = myline[1:]
+ if juststrings:
+ for k, v in newdict.items():
+ newdict[k] = " ".join(v)
+ return newdict
+
+def read_corresponding_eapi_file(filename):
+ """
+ Read the 'eapi' file from the directory 'filename' is in.
+ Returns "0" if the file is not present or invalid.
+ """
+ default = "0"
+ eapi_file = os.path.join(os.path.dirname(filename), "eapi")
+ try:
+ f = open(eapi_file, "r")
+ lines = f.readlines()
+ if len(lines) == 1:
+ eapi = lines[0].rstrip("\n")
+ else:
+ writemsg(_("--- Invalid 'eapi' file (doesn't contain exactly one line): %s\n") % (eapi_file),
+ noiselevel=-1)
+ eapi = default
+ f.close()
+ except IOError:
+ eapi = default
+
+ return eapi
+
+def grabdict_package(myfilename, juststrings=0, recursive=0, allow_wildcard=False, allow_repo=False,
+ verify_eapi=False, eapi=None):
+ """ Does the same thing as grabdict except it validates keys
+ with isvalidatom()"""
+ pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
+ if not pkgs:
+ return pkgs
+ if verify_eapi and eapi is None:
+ eapi = read_corresponding_eapi_file(myfilename)
+
+ # We need to call keys() here in order to avoid the possibility of
+ # "RuntimeError: dictionary changed size during iteration"
+ # when an invalid atom is deleted.
+ atoms = {}
+ for k, v in pkgs.items():
+ try:
+ k = Atom(k, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
+ noiselevel=-1)
+ else:
+ atoms[k] = v
+ return atoms
+
+def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=False, allow_repo=False,
+ remember_source_file=False, verify_eapi=False, eapi=None):
+
+ pkgs=grabfile(myfilename, compatlevel, recursive=recursive, remember_source_file=True)
+ if not pkgs:
+ return pkgs
+ if verify_eapi and eapi is None:
+ eapi = read_corresponding_eapi_file(myfilename)
+ mybasename = os.path.basename(myfilename)
+ atoms = []
+ for pkg, source_file in pkgs:
+ pkg_orig = pkg
+ # for packages and package.mask files
+ if pkg[:1] == "-":
+ pkg = pkg[1:]
+ if pkg[:1] == '*' and mybasename == 'packages':
+ pkg = pkg[1:]
+ try:
+ pkg = Atom(pkg, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
+ noiselevel=-1)
+ else:
+ if pkg_orig == str(pkg):
+ # normal atom, so return as Atom instance
+ if remember_source_file:
+ atoms.append((pkg, source_file))
+ else:
+ atoms.append(pkg)
+ else:
+ # atom has special prefix, so return as string
+ if remember_source_file:
+ atoms.append((pkg_orig, source_file))
+ else:
+ atoms.append(pkg_orig)
+ return atoms
+
+def grablines(myfilename, recursive=0, remember_source_file=False):
+ mylines=[]
+ if recursive and os.path.isdir(myfilename):
+ if os.path.basename(myfilename) in _ignorecvs_dirs:
+ return mylines
+ dirlist = os.listdir(myfilename)
+ dirlist.sort()
+ for f in dirlist:
+ if not f.startswith(".") and not f.endswith("~"):
+ mylines.extend(grablines(
+ os.path.join(myfilename, f), recursive, remember_source_file))
+ else:
+ try:
+ myfile = io.open(_unicode_encode(myfilename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ if remember_source_file:
+ mylines = [(line, myfilename) for line in myfile.readlines()]
+ else:
+ mylines = myfile.readlines()
+ myfile.close()
+ except IOError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(myfilename)
+ pass
+ return mylines
+
+def writedict(mydict,myfilename,writekey=True):
+ """Writes out a dict to a file; writekey=0 mode doesn't write out
+ the key and assumes all values are strings, not lists."""
+ lines = []
+ if not writekey:
+ for v in mydict.values():
+ lines.append(v + "\n")
+ else:
+ for k, v in mydict.items():
+ lines.append("%s %s\n" % (k, " ".join(v)))
+ write_atomic(myfilename, "".join(lines))
+
+def shlex_split(s):
+ """
+ This is equivalent to shlex.split but it temporarily encodes unicode
+ strings to bytes since shlex.split() doesn't handle unicode strings.
+ """
+ is_unicode = sys.hexversion < 0x3000000 and isinstance(s, unicode)
+ if is_unicode:
+ s = _unicode_encode(s)
+ rval = shlex.split(s)
+ if is_unicode:
+ rval = [_unicode_decode(x) for x in rval]
+ return rval
+
+class _tolerant_shlex(shlex.shlex):
+ def sourcehook(self, newfile):
+ try:
+ return shlex.shlex.sourcehook(self, newfile)
+ except EnvironmentError as e:
+ writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \
+ (self.infile, str(e)), noiselevel=-1)
+ return (newfile, io.StringIO())
+
+_invalid_var_name_re = re.compile(r'^\d|\W')
+
+def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
+ if isinstance(expand, dict):
+ # Some existing variable definitions have been
+ # passed in, for use in substitutions.
+ expand_map = expand
+ expand = True
+ else:
+ expand_map = {}
+ mykeys = {}
+ try:
+ # NOTE: shlex doesn't support unicode objects with Python 2
+ # (produces spurious \0 characters).
+ if sys.hexversion < 0x3000000:
+ content = open(_unicode_encode(mycfg,
+ encoding=_encodings['fs'], errors='strict'), 'rb').read()
+ else:
+ content = open(_unicode_encode(mycfg,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['content'], errors='replace').read()
+ except IOError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(mycfg)
+ if e.errno != errno.ENOENT:
+ writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1)
+ if e.errno not in (errno.EISDIR,):
+ raise
+ return None
+
+ # Workaround for avoiding a silent error in shlex that is
+ # triggered by a source statement at the end of the file
+ # without a trailing newline after the source statement.
+ if content and content[-1] != '\n':
+ content += '\n'
+
+ # Warn about dos-style line endings since that prevents
+ # people from being able to source them with bash.
+ if '\r' in content:
+ writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \
+ "in config file: '%s'") + "\n") % mycfg, noiselevel=-1)
+
+ try:
+ if tolerant:
+ shlex_class = _tolerant_shlex
+ else:
+ shlex_class = shlex.shlex
+ # The default shlex.sourcehook() implementation
+ # only joins relative paths when the infile
+ # attribute is properly set.
+ lex = shlex_class(content, infile=mycfg, posix=True)
+ lex.wordchars = string.digits + string.ascii_letters + \
+ "~!@#$%*_\:;?,./-+{}"
+ lex.quotes="\"'"
+ if allow_sourcing:
+ lex.source="source"
+ while 1:
+ key=lex.get_token()
+ if key == "export":
+ key = lex.get_token()
+ if key is None:
+ #normal end of file
+ break;
+ equ=lex.get_token()
+ if (equ==''):
+ #unexpected end of file
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
+ noiselevel=-1)
+ raise Exception(_("ParseError: Unexpected EOF: %s: on/before line %s") % (mycfg, lex.lineno))
+ else:
+ return mykeys
+ elif (equ!='='):
+ #invalid token
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ raise Exception(_("ParseError: Invalid token "
+ "'%s' (not '='): %s: line %s") % \
+ (equ, mycfg, lex.lineno))
+ else:
+ return mykeys
+ val=lex.get_token()
+ if val is None:
+ #unexpected end of file
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
+ noiselevel=-1)
+ raise portage.exception.CorruptionError(_("ParseError: Unexpected EOF: %s: line %s") % (mycfg, lex.lineno))
+ else:
+ return mykeys
+ key = _unicode_decode(key)
+ val = _unicode_decode(val)
+
+ if _invalid_var_name_re.search(key) is not None:
+ if not tolerant:
+ raise Exception(_(
+ "ParseError: Invalid variable name '%s': line %s") % \
+ (key, lex.lineno - 1))
+ writemsg(_("!!! Invalid variable name '%s': line %s in %s\n") \
+ % (key, lex.lineno - 1, mycfg), noiselevel=-1)
+ continue
+
+ if expand:
+ mykeys[key] = varexpand(val, expand_map)
+ expand_map[key] = mykeys[key]
+ else:
+ mykeys[key] = val
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ raise portage.exception.ParseError(str(e)+" in "+mycfg)
+ return mykeys
+
+#cache expansions of constant strings
+cexpand={}
+def varexpand(mystring, mydict=None):
+ if mydict is None:
+ mydict = {}
+ newstring = cexpand.get(" "+mystring, None)
+ if newstring is not None:
+ return newstring
+
+ """
+ new variable expansion code. Preserves quotes, handles \n, etc.
+ This code is used by the configfile code, as well as others (parser)
+ This would be a good bunch of code to port to C.
+ """
+ numvars=0
+ mystring=" "+mystring
+ #in single, double quotes
+ insing=0
+ indoub=0
+ pos=1
+ newstring=" "
+ while (pos<len(mystring)):
+ if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
+ if (indoub):
+ newstring=newstring+"'"
+ else:
+ newstring += "'" # Quote removal is handled by shlex.
+ insing=not insing
+ pos=pos+1
+ continue
+ elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
+ if (insing):
+ newstring=newstring+'"'
+ else:
+ newstring += '"' # Quote removal is handled by shlex.
+ indoub=not indoub
+ pos=pos+1
+ continue
+ if (not insing):
+ #expansion time
+ if (mystring[pos]=="\n"):
+ #convert newlines to spaces
+ newstring=newstring+" "
+ pos=pos+1
+ elif (mystring[pos]=="\\"):
+ # For backslash expansion, this function used to behave like
+ # echo -e, but that's not needed for our purposes. We want to
+ # behave like bash does when expanding a variable assignment
+ # in a sourced file, in which case it performs backslash
+ # removal for \\ and \$ but nothing more. It also removes
+ # escaped newline characters. Note that we don't handle
+ # escaped quotes here, since getconfig() uses shlex
+ # to handle that earlier.
+ if (pos+1>=len(mystring)):
+ newstring=newstring+mystring[pos]
+ break
+ else:
+ a = mystring[pos + 1]
+ pos = pos + 2
+ if a in ("\\", "$"):
+ newstring = newstring + a
+ elif a == "\n":
+ pass
+ else:
+ newstring = newstring + mystring[pos-2:pos]
+ continue
+ elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
+ pos=pos+1
+ if mystring[pos]=="{":
+ pos=pos+1
+ braced=True
+ else:
+ braced=False
+ myvstart=pos
+ validchars=string.ascii_letters+string.digits+"_"
+ while mystring[pos] in validchars:
+ if (pos+1)>=len(mystring):
+ if braced:
+ cexpand[mystring]=""
+ return ""
+ else:
+ pos=pos+1
+ break
+ pos=pos+1
+ myvarname=mystring[myvstart:pos]
+ if braced:
+ if mystring[pos]!="}":
+ cexpand[mystring]=""
+ return ""
+ else:
+ pos=pos+1
+ if len(myvarname)==0:
+ cexpand[mystring]=""
+ return ""
+ numvars=numvars+1
+ if myvarname in mydict:
+ newstring=newstring+mydict[myvarname]
+ else:
+ newstring=newstring+mystring[pos]
+ pos=pos+1
+ else:
+ newstring=newstring+mystring[pos]
+ pos=pos+1
+ if numvars==0:
+ cexpand[mystring]=newstring[1:]
+ return newstring[1:]
+
+# broken and removed, but can still be imported
+pickle_write = None
+
+def pickle_read(filename,default=None,debug=0):
+ if not os.access(filename, os.R_OK):
+ writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
+ return default
+ data = None
+ try:
+ myf = open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mypickle = pickle.Unpickler(myf)
+ data = mypickle.load()
+ myf.close()
+ del mypickle,myf
+ writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
+ data = default
+ return data
+
+def dump_traceback(msg, noiselevel=1):
+ info = sys.exc_info()
+ if not info[2]:
+ stack = traceback.extract_stack()[:-1]
+ error = None
+ else:
+ stack = traceback.extract_tb(info[2])
+ error = str(info[1])
+ writemsg("\n====================================\n", noiselevel=noiselevel)
+ writemsg("%s\n\n" % msg, noiselevel=noiselevel)
+ for line in traceback.format_list(stack):
+ writemsg(line, noiselevel=noiselevel)
+ if error:
+ writemsg(error+"\n", noiselevel=noiselevel)
+ writemsg("====================================\n\n", noiselevel=noiselevel)
+
+class cmp_sort_key(object):
+ """
+ In python-3.0 the list.sort() method no longer has a "cmp" keyword
+ argument. This class acts as an adapter which converts a cmp function
+ into one that's suitable for use as the "key" keyword argument to
+ list.sort(), making it easier to port code for python-3.0 compatibility.
+ It works by generating key objects which use the given cmp function to
+ implement their __lt__ method.
+ """
+ __slots__ = ("_cmp_func",)
+
+ def __init__(self, cmp_func):
+ """
+ @type cmp_func: callable which takes 2 positional arguments
+ @param cmp_func: A cmp function.
+ """
+ self._cmp_func = cmp_func
+
+ def __call__(self, lhs):
+ return self._cmp_key(self._cmp_func, lhs)
+
+ class _cmp_key(object):
+ __slots__ = ("_cmp_func", "_obj")
+
+ def __init__(self, cmp_func, obj):
+ self._cmp_func = cmp_func
+ self._obj = obj
+
+ def __lt__(self, other):
+ if other.__class__ is not self.__class__:
+ raise TypeError("Expected type %s, got %s" % \
+ (self.__class__, other.__class__))
+ return self._cmp_func(self._obj, other._obj) < 0
+
+def unique_array(s):
+ """lifted from python cookbook, credit: Tim Peters
+ Return a list of the elements in s in arbitrary order, sans duplicates"""
+ n = len(s)
+ # assume all elements are hashable, if so, it's linear
+ try:
+ return list(set(s))
+ except TypeError:
+ pass
+
+ # so much for linear. abuse sort.
+ try:
+ t = list(s)
+ t.sort()
+ except TypeError:
+ pass
+ else:
+ assert n > 0
+ last = t[0]
+ lasti = i = 1
+ while i < n:
+ if t[i] != last:
+ t[lasti] = last = t[i]
+ lasti += 1
+ i += 1
+ return t[:lasti]
+
+ # blah. back to original portage.unique_array
+ u = []
+ for x in s:
+ if x not in u:
+ u.append(x)
+ return u
+
+def unique_everseen(iterable, key=None):
+ """
+ List unique elements, preserving order. Remember all elements ever seen.
+ Taken from itertools documentation.
+ """
+ # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+ # unique_everseen('ABBCcAD', str.lower) --> A B C D
+ seen = set()
+ seen_add = seen.add
+ if key is None:
+ for element in filterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
+
+def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+ stat_cached=None, follow_links=True):
+ """Apply user, group, and mode bits to a file if the existing bits do not
+ already match. The default behavior is to force an exact match of mode
+ bits. When mask=0 is specified, mode bits on the target file are allowed
+ to be a superset of the mode argument (via logical OR). When mask>0, the
+ mode bits that the target file is allowed to have are restricted via
+ logical XOR.
+ Returns True if the permissions were modified and False otherwise."""
+
+ modified = False
+
+ if stat_cached is None:
+ try:
+ if follow_links:
+ stat_cached = os.stat(filename)
+ else:
+ stat_cached = os.lstat(filename)
+ except OSError as oe:
+ func_call = "stat('%s')" % filename
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ if (uid != -1 and uid != stat_cached.st_uid) or \
+ (gid != -1 and gid != stat_cached.st_gid):
+ try:
+ if follow_links:
+ os.chown(filename, uid, gid)
+ else:
+ portage.data.lchown(filename, uid, gid)
+ modified = True
+ except OSError as oe:
+ func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ new_mode = -1
+ st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits
+ if mask >= 0:
+ if mode == -1:
+ mode = 0 # Don't add any mode bits when mode is unspecified.
+ else:
+ mode = mode & 0o7777
+ if (mode & st_mode != mode) or \
+ ((mask ^ st_mode) & st_mode != st_mode):
+ new_mode = mode | st_mode
+ new_mode = (mask ^ new_mode) & new_mode
+ elif mode != -1:
+ mode = mode & 0o7777 # protect from unwanted bits
+ if mode != st_mode:
+ new_mode = mode
+
+ # The chown system call may clear S_ISUID and S_ISGID
+ # bits, so those bits are restored if necessary.
+ if modified and new_mode == -1 and \
+ (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
+ if mode == -1:
+ new_mode = st_mode
+ else:
+ mode = mode & 0o7777
+ if mask >= 0:
+ new_mode = mode | st_mode
+ new_mode = (mask ^ new_mode) & new_mode
+ else:
+ new_mode = mode
+ if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
+ new_mode = -1
+
+ if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
+ # Mode doesn't matter for symlinks.
+ new_mode = -1
+
+ if new_mode != -1:
+ try:
+ os.chmod(filename, new_mode)
+ modified = True
+ except OSError as oe:
+ func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ raise
+ return modified
+
+def apply_stat_permissions(filename, newstat, **kwargs):
+ """A wrapper around apply_secpass_permissions that gets
+ uid, gid, and mode from a stat object"""
+ return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
+ mode=newstat.st_mode, **kwargs)
+
+def apply_recursive_permissions(top, uid=-1, gid=-1,
+ dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
+ """A wrapper around apply_secpass_permissions that applies permissions
+ recursively. If optional argument onerror is specified, it should be a
+ function; it will be called with one argument, a PortageException instance.
+ Returns True if all permissions are applied and False if some are left
+ unapplied."""
+
+ # Avoid issues with circular symbolic links, as in bug #339670.
+ follow_links = False
+
+ if onerror is None:
+ # Default behavior is to dump errors to stderr so they won't
+ # go unnoticed. Callers can pass in a quiet instance.
+ def onerror(e):
+ if isinstance(e, OperationNotPermitted):
+ writemsg(_("Operation Not Permitted: %s\n") % str(e),
+ noiselevel=-1)
+ elif isinstance(e, FileNotFound):
+ writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
+ else:
+ raise
+
+ all_applied = True
+ for dirpath, dirnames, filenames in os.walk(top):
+ try:
+ applied = apply_secpass_permissions(dirpath,
+ uid=uid, gid=gid, mode=dirmode, mask=dirmask,
+ follow_links=follow_links)
+ if not applied:
+ all_applied = False
+ except PortageException as e:
+ all_applied = False
+ onerror(e)
+
+ for name in filenames:
+ try:
+ applied = apply_secpass_permissions(os.path.join(dirpath, name),
+ uid=uid, gid=gid, mode=filemode, mask=filemask,
+ follow_links=follow_links)
+ if not applied:
+ all_applied = False
+ except PortageException as e:
+ # Ignore InvalidLocation exceptions such as FileNotFound
+ # and DirectoryNotFound since sometimes things disappear,
+ # like when adjusting permissions on DISTCC_DIR.
+ if not isinstance(e, portage.exception.InvalidLocation):
+ all_applied = False
+ onerror(e)
+ return all_applied
+
+def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+ stat_cached=None, follow_links=True):
+ """A wrapper around apply_permissions that uses secpass and simple
+ logic to apply as much of the permissions as possible without
+ generating an obviously avoidable permission exception. Despite
+ attempts to avoid an exception, it's possible that one will be raised
+ anyway, so be prepared.
+ Returns True if all permissions are applied and False if some are left
+ unapplied."""
+
+ if stat_cached is None:
+ try:
+ if follow_links:
+ stat_cached = os.stat(filename)
+ else:
+ stat_cached = os.lstat(filename)
+ except OSError as oe:
+ func_call = "stat('%s')" % filename
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ all_applied = True
+
+ if portage.data.secpass < 2:
+
+ if uid != -1 and \
+ uid != stat_cached.st_uid:
+ all_applied = False
+ uid = -1
+
+ if gid != -1 and \
+ gid != stat_cached.st_gid and \
+ gid not in os.getgroups():
+ all_applied = False
+ gid = -1
+
+ apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
+ stat_cached=stat_cached, follow_links=follow_links)
+ return all_applied
+
+class atomic_ofstream(ObjectProxy):
+ """Write a file atomically via os.rename(). Atomic replacement prevents
+ interprocess interference and prevents corruption of the target
+ file when the write is interrupted (for example, when an 'out of space'
+ error occurs)."""
+
+ def __init__(self, filename, mode='w', follow_links=True, **kargs):
+ """Opens a temporary filename.pid in the same directory as filename."""
+ ObjectProxy.__init__(self)
+ object.__setattr__(self, '_aborted', False)
+ if 'b' in mode:
+ open_func = open
+ else:
+ open_func = io.open
+ kargs.setdefault('encoding', _encodings['content'])
+ kargs.setdefault('errors', 'backslashreplace')
+
+ if follow_links:
+ canonical_path = os.path.realpath(filename)
+ object.__setattr__(self, '_real_name', canonical_path)
+ tmp_name = "%s.%i" % (canonical_path, os.getpid())
+ try:
+ object.__setattr__(self, '_file',
+ open_func(_unicode_encode(tmp_name,
+ encoding=_encodings['fs'], errors='strict'),
+ mode=mode, **kargs))
+ return
+ except IOError as e:
+ if canonical_path == filename:
+ raise
+ # Ignore this error, since it's irrelevant
+ # and the below open call will produce a
+ # new error if necessary.
+
+ object.__setattr__(self, '_real_name', filename)
+ tmp_name = "%s.%i" % (filename, os.getpid())
+ object.__setattr__(self, '_file',
+ open_func(_unicode_encode(tmp_name,
+ encoding=_encodings['fs'], errors='strict'),
+ mode=mode, **kargs))
+
+ def _get_target(self):
+ return object.__getattribute__(self, '_file')
+
+ if sys.hexversion >= 0x3000000:
+
+ def __getattribute__(self, attr):
+ if attr in ('close', 'abort', '__del__'):
+ return object.__getattribute__(self, attr)
+ return getattr(object.__getattribute__(self, '_file'), attr)
+
+ else:
+
+ # For TextIOWrapper, automatically coerce write calls to
+ # unicode, in order to avoid TypeError when writing raw
+ # bytes with python2.
+
+ def __getattribute__(self, attr):
+ if attr in ('close', 'abort', 'write', '__del__'):
+ return object.__getattribute__(self, attr)
+ return getattr(object.__getattribute__(self, '_file'), attr)
+
+ def write(self, s):
+ f = object.__getattribute__(self, '_file')
+ if isinstance(f, io.TextIOWrapper):
+ s = _unicode_decode(s)
+ return f.write(s)
+
+ def close(self):
+ """Closes the temporary file, copies permissions (if possible),
+ and performs the atomic replacement via os.rename(). If the abort()
+ method has been called, then the temp file is closed and removed."""
+ f = object.__getattribute__(self, '_file')
+ real_name = object.__getattribute__(self, '_real_name')
+ if not f.closed:
+ try:
+ f.close()
+ if not object.__getattribute__(self, '_aborted'):
+ try:
+ apply_stat_permissions(f.name, os.stat(real_name))
+ except OperationNotPermitted:
+ pass
+ except FileNotFound:
+ pass
+ except OSError as oe: # from the above os.stat call
+ if oe.errno in (errno.ENOENT, errno.EPERM):
+ pass
+ else:
+ raise
+ os.rename(f.name, real_name)
+ finally:
+ # Make sure we cleanup the temp file
+ # even if an exception is raised.
+ try:
+ os.unlink(f.name)
+ except OSError as oe:
+ pass
+
+ def abort(self):
+ """If an error occurs while writing the file, the user should
+ call this method in order to leave the target file unchanged.
+ This will call close() automatically."""
+ if not object.__getattribute__(self, '_aborted'):
+ object.__setattr__(self, '_aborted', True)
+ self.close()
+
+ def __del__(self):
+ """If the user does not explicitely call close(), it is
+ assumed that an error has occurred, so we abort()."""
+ try:
+ f = object.__getattribute__(self, '_file')
+ except AttributeError:
+ pass
+ else:
+ if not f.closed:
+ self.abort()
+ # ensure destructor from the base class is called
+ base_destructor = getattr(ObjectProxy, '__del__', None)
+ if base_destructor is not None:
+ base_destructor(self)
+
+def write_atomic(file_path, content, **kwargs):
+ f = None
+ try:
+ f = atomic_ofstream(file_path, **kwargs)
+ f.write(content)
+ f.close()
+ except (IOError, OSError) as e:
+ if f:
+ f.abort()
+ func_call = "write_atomic('%s')" % file_path
+ if e.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif e.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif e.errno == errno.ENOENT:
+ raise FileNotFound(file_path)
+ else:
+ raise
+
+def ensure_dirs(dir_path, **kwargs):
+ """Create a directory and call apply_permissions.
+ Returns True if a directory is created or the permissions needed to be
+ modified, and False otherwise.
+
+ This function's handling of EEXIST errors makes it useful for atomic
+ directory creation, in which multiple processes may be competing to
+ create the same directory.
+ """
+
+ created_dir = False
+
+ try:
+ os.makedirs(dir_path)
+ created_dir = True
+ except OSError as oe:
+ func_call = "makedirs('%s')" % dir_path
+ if oe.errno in (errno.EEXIST,):
+ pass
+ else:
+ if os.path.isdir(dir_path):
+ # NOTE: DragonFly raises EPERM for makedir('/')
+ # and that is supposed to be ignored here.
+ # Also, sometimes mkdir raises EISDIR on FreeBSD
+ # and we want to ignore that too (bug #187518).
+ pass
+ elif oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ else:
+ raise
+ if kwargs:
+ perms_modified = apply_permissions(dir_path, **kwargs)
+ else:
+ perms_modified = False
+ return created_dir or perms_modified
+
+class LazyItemsDict(UserDict):
+ """A mapping object that behaves like a standard dict except that it allows
+ for lazy initialization of values via callable objects. Lazy items can be
+ overwritten and deleted just as normal items."""
+
+ __slots__ = ('lazy_items',)
+
+ def __init__(self, *args, **kwargs):
+
+ self.lazy_items = {}
+ UserDict.__init__(self, *args, **kwargs)
+
+ def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
+ """Add a lazy item for the given key. When the item is requested,
+ value_callable will be called with *pargs and **kwargs arguments."""
+ self.lazy_items[item_key] = \
+ self._LazyItem(value_callable, pargs, kwargs, False)
+ # make it show up in self.keys(), etc...
+ UserDict.__setitem__(self, item_key, None)
+
+ def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
+ """This is like addLazyItem except value_callable will only be called
+ a maximum of 1 time and the result will be cached for future requests."""
+ self.lazy_items[item_key] = \
+ self._LazyItem(value_callable, pargs, kwargs, True)
+ # make it show up in self.keys(), etc...
+ UserDict.__setitem__(self, item_key, None)
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ if args:
+ map_obj = args[0]
+ else:
+ map_obj = None
+ if map_obj is None:
+ pass
+ elif isinstance(map_obj, LazyItemsDict):
+ for k in map_obj:
+ if k in map_obj.lazy_items:
+ UserDict.__setitem__(self, k, None)
+ else:
+ UserDict.__setitem__(self, k, map_obj[k])
+ self.lazy_items.update(map_obj.lazy_items)
+ else:
+ UserDict.update(self, map_obj)
+ if kwargs:
+ UserDict.update(self, kwargs)
+
+ def __getitem__(self, item_key):
+ if item_key in self.lazy_items:
+ lazy_item = self.lazy_items[item_key]
+ pargs = lazy_item.pargs
+ if pargs is None:
+ pargs = ()
+ kwargs = lazy_item.kwargs
+ if kwargs is None:
+ kwargs = {}
+ result = lazy_item.func(*pargs, **kwargs)
+ if lazy_item.singleton:
+ self[item_key] = result
+ return result
+
+ else:
+ return UserDict.__getitem__(self, item_key)
+
+ def __setitem__(self, item_key, value):
+ if item_key in self.lazy_items:
+ del self.lazy_items[item_key]
+ UserDict.__setitem__(self, item_key, value)
+
+ def __delitem__(self, item_key):
+ if item_key in self.lazy_items:
+ del self.lazy_items[item_key]
+ UserDict.__delitem__(self, item_key)
+
+ def clear(self):
+ self.lazy_items.clear()
+ UserDict.clear(self)
+
+ def copy(self):
+ return self.__copy__()
+
+ def __copy__(self):
+ return self.__class__(self)
+
+ def __deepcopy__(self, memo=None):
+ """
+ This forces evaluation of each contained lazy item, and deepcopy of
+ the result. A TypeError is raised if any contained lazy item is not
+ a singleton, since it is not necessarily possible for the behavior
+ of this type of item to be safely preserved.
+ """
+ if memo is None:
+ memo = {}
+ result = self.__class__()
+ memo[id(self)] = result
+ for k in self:
+ k_copy = deepcopy(k, memo)
+ lazy_item = self.lazy_items.get(k)
+ if lazy_item is not None:
+ if not lazy_item.singleton:
+ raise TypeError(_unicode_decode("LazyItemsDict " + \
+ "deepcopy is unsafe with lazy items that are " + \
+ "not singletons: key=%s value=%s") % (k, lazy_item,))
+ UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
+ return result
+
+ class _LazyItem(object):
+
+ __slots__ = ('func', 'pargs', 'kwargs', 'singleton')
+
+ def __init__(self, func, pargs, kwargs, singleton):
+
+ if not pargs:
+ pargs = None
+ if not kwargs:
+ kwargs = None
+
+ self.func = func
+ self.pargs = pargs
+ self.kwargs = kwargs
+ self.singleton = singleton
+
+ def __copy__(self):
+ return self.__class__(self.func, self.pargs,
+ self.kwargs, self.singleton)
+
+ def __deepcopy__(self, memo=None):
+ """
+ Override this since the default implementation can fail silently,
+ leaving some attributes unset.
+ """
+ if memo is None:
+ memo = {}
+ result = self.__copy__()
+ memo[id(self)] = result
+ result.func = deepcopy(self.func, memo)
+ result.pargs = deepcopy(self.pargs, memo)
+ result.kwargs = deepcopy(self.kwargs, memo)
+ result.singleton = deepcopy(self.singleton, memo)
+ return result
+
+class ConfigProtect(object):
+ def __init__(self, myroot, protect_list, mask_list):
+ self.myroot = myroot
+ self.protect_list = protect_list
+ self.mask_list = mask_list
+ self.updateprotect()
+
+ def updateprotect(self):
+ """Update internal state for isprotected() calls. Nonexistent paths
+ are ignored."""
+
+ os = _os_merge
+
+ self.protect = []
+ self._dirs = set()
+ for x in self.protect_list:
+ ppath = normalize_path(
+ os.path.join(self.myroot, x.lstrip(os.path.sep)))
+ try:
+ if stat.S_ISDIR(os.stat(ppath).st_mode):
+ self._dirs.add(ppath)
+ self.protect.append(ppath)
+ except OSError:
+ # If it doesn't exist, there's no need to protect it.
+ pass
+
+ self.protectmask = []
+ for x in self.mask_list:
+ ppath = normalize_path(
+ os.path.join(self.myroot, x.lstrip(os.path.sep)))
+ try:
+ """Use lstat so that anything, even a broken symlink can be
+ protected."""
+ if stat.S_ISDIR(os.lstat(ppath).st_mode):
+ self._dirs.add(ppath)
+ self.protectmask.append(ppath)
+ """Now use stat in case this is a symlink to a directory."""
+ if stat.S_ISDIR(os.stat(ppath).st_mode):
+ self._dirs.add(ppath)
+ except OSError:
+ # If it doesn't exist, there's no need to mask it.
+ pass
+
+ def isprotected(self, obj):
+ """Returns True if obj is protected, False otherwise. The caller must
+ ensure that obj is normalized with a single leading slash. A trailing
+ slash is optional for directories."""
+ masked = 0
+ protected = 0
+ sep = os.path.sep
+ for ppath in self.protect:
+ if len(ppath) > masked and obj.startswith(ppath):
+ if ppath in self._dirs:
+ if obj != ppath and not obj.startswith(ppath + sep):
+ # /etc/foo does not match /etc/foobaz
+ continue
+ elif obj != ppath:
+ # force exact match when CONFIG_PROTECT lists a
+ # non-directory
+ continue
+ protected = len(ppath)
+ #config file management
+ for pmpath in self.protectmask:
+ if len(pmpath) >= protected and obj.startswith(pmpath):
+ if pmpath in self._dirs:
+ if obj != pmpath and \
+ not obj.startswith(pmpath + sep):
+ # /etc/foo does not match /etc/foobaz
+ continue
+ elif obj != pmpath:
+ # force exact match when CONFIG_PROTECT_MASK lists
+ # a non-directory
+ continue
+ #skip, it's in the mask
+ masked = len(pmpath)
+ return protected > masked
+
+def new_protect_filename(mydest, newmd5=None, force=False):
+ """Resolves a config-protect filename for merging, optionally
+ using the last filename if the md5 matches. If force is True,
+ then a new filename will be generated even if mydest does not
+ exist yet.
+ (dest,md5) ==> 'string' --- path_to_target_filename
+ (dest) ==> ('next', 'highest') --- next_target and most-recent_target
+ """
+
+ # config protection filename format:
+ # ._cfg0000_foo
+ # 0123456789012
+
+ os = _os_merge
+
+ prot_num = -1
+ last_pfile = ""
+
+ if not force and \
+ not os.path.exists(mydest):
+ return mydest
+
+ real_filename = os.path.basename(mydest)
+ real_dirname = os.path.dirname(mydest)
+ for pfile in os.listdir(real_dirname):
+ if pfile[0:5] != "._cfg":
+ continue
+ if pfile[10:] != real_filename:
+ continue
+ try:
+ new_prot_num = int(pfile[5:9])
+ if new_prot_num > prot_num:
+ prot_num = new_prot_num
+ last_pfile = pfile
+ except ValueError:
+ continue
+ prot_num = prot_num + 1
+
+ new_pfile = normalize_path(os.path.join(real_dirname,
+ "._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
+ old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
+ if last_pfile and newmd5:
+ try:
+ last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile)
+ except FileNotFound:
+ # The file suddenly disappeared or it's a broken symlink.
+ pass
+ else:
+ if last_pfile_md5 == newmd5:
+ return old_pfile
+ return new_pfile
+
+def find_updated_config_files(target_root, config_protect):
+ """
+ Return a tuple of configuration files that needs to be updated.
+ The tuple contains lists organized like this:
+ [ protected_dir, file_list ]
+ If the protected config isn't a protected_dir but a procted_file, list is:
+ [ protected_file, None ]
+ If no configuration files needs to be updated, None is returned
+ """
+
+ os = _os_merge
+
+ if config_protect:
+ # directories with some protect files in them
+ for x in config_protect:
+ files = []
+
+ x = os.path.join(target_root, x.lstrip(os.path.sep))
+ if not os.access(x, os.W_OK):
+ continue
+ try:
+ mymode = os.lstat(x).st_mode
+ except OSError:
+ continue
+
+ if stat.S_ISLNK(mymode):
+ # We want to treat it like a directory if it
+ # is a symlink to an existing directory.
+ try:
+ real_mode = os.stat(x).st_mode
+ if stat.S_ISDIR(real_mode):
+ mymode = real_mode
+ except OSError:
+ pass
+
+ if stat.S_ISDIR(mymode):
+ mycommand = \
+ "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
+ else:
+ mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
+ os.path.split(x.rstrip(os.path.sep))
+ mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
+ a = subprocess_getstatusoutput(mycommand)
+
+ if a[0] == 0:
+ files = a[1].split('\0')
+ # split always produces an empty string as the last element
+ if files and not files[-1]:
+ del files[-1]
+ if files:
+ if stat.S_ISDIR(mymode):
+ yield (x, files)
+ else:
+ yield (x, None)
+
+def getlibpaths(root, env=None):
+ """ Return a list of paths that are used for library lookups """
+ if env is None:
+ env = os.environ
+ # the following is based on the information from ld.so(8)
+ rval = env.get("LD_LIBRARY_PATH", "").split(":")
+ rval.extend(grabfile(os.path.join(root, "etc", "ld.so.conf")))
+ rval.append("/usr/lib")
+ rval.append("/lib")
+
+ return [normalize_path(x) for x in rval if x]
diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py
new file mode 100644
index 0000000..52670d9
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py
@@ -0,0 +1,805 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import subprocess
+
+import portage
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.exception import CommandNotFound
+from portage.localization import _
+from portage.util import getlibpaths
+from portage.util import grabfile
+from portage.util import normalize_path
+from portage.util import writemsg_level
+
+class LinkageMapELF(object):
+
+ """Models dynamic linker dependencies."""
+
+ _needed_aux_key = "NEEDED.ELF.2"
+ _soname_map_class = slot_dict_class(
+ ("consumers", "providers"), prefix="")
+
+ class _obj_properies_class(object):
+
+ __slots__ = ("arch", "needed", "runpaths", "soname", "alt_paths",
+ "owner",)
+
+ def __init__(self, arch, needed, runpaths, soname, alt_paths, owner):
+ self.arch = arch
+ self.needed = needed
+ self.runpaths = runpaths
+ self.soname = soname
+ self.alt_paths = alt_paths
+ self.owner = owner
+
+ def __init__(self, vardbapi):
+ self._dbapi = vardbapi
+ self._root = self._dbapi.settings['ROOT']
+ self._libs = {}
+ self._obj_properties = {}
+ self._obj_key_cache = {}
+ self._defpath = set()
+ self._path_key_cache = {}
+
+ def _clear_cache(self):
+ self._libs.clear()
+ self._obj_properties.clear()
+ self._obj_key_cache.clear()
+ self._defpath.clear()
+ self._path_key_cache.clear()
+
+ def _path_key(self, path):
+ key = self._path_key_cache.get(path)
+ if key is None:
+ key = self._ObjectKey(path, self._root)
+ self._path_key_cache[path] = key
+ return key
+
+ def _obj_key(self, path):
+ key = self._obj_key_cache.get(path)
+ if key is None:
+ key = self._ObjectKey(path, self._root)
+ self._obj_key_cache[path] = key
+ return key
+
+ class _ObjectKey(object):
+
+ """Helper class used as _obj_properties keys for objects."""
+
+ __slots__ = ("_key",)
+
+ def __init__(self, obj, root):
+ """
+ This takes a path to an object.
+
+ @param object: path to a file
+ @type object: string (example: '/usr/bin/bar')
+
+ """
+ self._key = self._generate_object_key(obj, root)
+
+ def __hash__(self):
+ return hash(self._key)
+
+ def __eq__(self, other):
+ return self._key == other._key
+
+ def _generate_object_key(self, obj, root):
+ """
+ Generate object key for a given object.
+
+ @param object: path to a file
+ @type object: string (example: '/usr/bin/bar')
+ @rtype: 2-tuple of types (long, int) if object exists. string if
+ object does not exist.
+ @return:
+ 1. 2-tuple of object's inode and device from a stat call, if object
+ exists.
+ 2. realpath of object if object does not exist.
+
+ """
+
+ os = _os_merge
+
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ abs_path = os.path.join(root, obj.lstrip(os.sep))
+ try:
+ object_stat = os.stat(abs_path)
+ except OSError:
+ # Use the realpath as the key if the file does not exists on the
+ # filesystem.
+ return os.path.realpath(abs_path)
+ # Return a tuple of the device and inode.
+ return (object_stat.st_dev, object_stat.st_ino)
+
+ def file_exists(self):
+ """
+ Determine if the file for this key exists on the filesystem.
+
+ @rtype: Boolean
+ @return:
+ 1. True if the file exists.
+ 2. False if the file does not exist or is a broken symlink.
+
+ """
+ return isinstance(self._key, tuple)
+
+ class _LibGraphNode(_ObjectKey):
+ __slots__ = ("alt_paths",)
+
+ def __init__(self, key):
+ """
+ Create a _LibGraphNode from an existing _ObjectKey.
+ This re-uses the _key attribute in order to avoid repeating
+ any previous stat calls, which helps to avoid potential race
+ conditions due to inconsistent stat results when the
+ file system is being modified concurrently.
+ """
+ self._key = key._key
+ self.alt_paths = set()
+
+ def __str__(self):
+ return str(sorted(self.alt_paths))
+
+ def rebuild(self, exclude_pkgs=None, include_file=None,
+ preserve_paths=None):
+ """
+ Raises CommandNotFound if there are preserved libs
+ and the scanelf binary is not available.
+
+ @param exclude_pkgs: A set of packages that should be excluded from
+ the LinkageMap, since they are being unmerged and their NEEDED
+ entries are therefore irrelevant and would only serve to corrupt
+ the LinkageMap.
+ @type exclude_pkgs: set
+ @param include_file: The path of a file containing NEEDED entries for
+ a package which does not exist in the vardbapi yet because it is
+ currently being merged.
+ @type include_file: String
+ @param preserve_paths: Libraries preserved by a package instance that
+ is currently being merged. They need to be explicitly passed to the
+ LinkageMap, since they are not registered in the
+ PreservedLibsRegistry yet.
+ @type preserve_paths: set
+ """
+
+ os = _os_merge
+ root = self._root
+ root_len = len(root) - 1
+ self._clear_cache()
+ self._defpath.update(getlibpaths(self._root, env=self._dbapi.settings))
+ libs = self._libs
+ obj_properties = self._obj_properties
+
+ lines = []
+
+ # Data from include_file is processed first so that it
+ # overrides any data from previously installed files.
+ if include_file is not None:
+ for line in grabfile(include_file):
+ lines.append((None, include_file, line))
+
+ aux_keys = [self._needed_aux_key]
+ can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
+ if can_lock:
+ self._dbapi.lock()
+ try:
+ for cpv in self._dbapi.cpv_all():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ continue
+ needed_file = self._dbapi.getpath(cpv,
+ filename=self._needed_aux_key)
+ for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
+ lines.append((cpv, needed_file, line))
+ finally:
+ if can_lock:
+ self._dbapi.unlock()
+
+ # have to call scanelf for preserved libs here as they aren't
+ # registered in NEEDED.ELF.2 files
+ plibs = {}
+ if preserve_paths is not None:
+ plibs.update((x, None) for x in preserve_paths)
+ if self._dbapi._plib_registry and \
+ self._dbapi._plib_registry.hasEntries():
+ for cpv, items in \
+ self._dbapi._plib_registry.getPreservedLibs().items():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ # These preserved libs will either be unmerged,
+ # rendering them irrelevant, or they will be
+ # preserved in the replacement package and are
+ # already represented via the preserve_paths
+ # parameter.
+ continue
+ plibs.update((x, cpv) for x in items)
+ if plibs:
+ args = ["/usr/bin/scanelf", "-qF", "%a;%F;%S;%r;%n"]
+ args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
+ for x in plibs)
+ try:
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise CommandNotFound(args[0])
+ else:
+ for l in proc.stdout:
+ try:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='strict')
+ except UnicodeDecodeError:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='replace')
+ writemsg_level(_("\nError decoding characters " \
+ "returned from scanelf: %s\n\n") % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ l = l[3:].rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 5:
+ writemsg_level(_("\nWrong number of fields " \
+ "returned from scanelf: %s\n\n") % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ fields[1] = fields[1][root_len:]
+ owner = plibs.pop(fields[1], None)
+ lines.append((owner, "scanelf", ";".join(fields)))
+ proc.wait()
+
+ if plibs:
+ # Preserved libraries that did not appear in the scanelf output.
+ # This is known to happen with statically linked libraries.
+ # Generate dummy lines for these, so we can assume that every
+ # preserved library has an entry in self._obj_properties. This
+ # is important in order to prevent findConsumers from raising
+ # an unwanted KeyError.
+ for x, cpv in plibs.items():
+ lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))
+
+ # Share identical frozenset instances when available,
+ # in order to conserve memory.
+ frozensets = {}
+
+ for owner, location, l in lines:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ fields = l.split(";")
+ if len(fields) < 5:
+ writemsg_level(_("\nWrong number of fields " \
+ "in %s: %s\n\n") % (location, l),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ arch = fields[0]
+ obj = fields[1]
+ soname = fields[2]
+ path = frozenset(normalize_path(x) \
+ for x in filter(None, fields[3].replace(
+ "${ORIGIN}", os.path.dirname(obj)).replace(
+ "$ORIGIN", os.path.dirname(obj)).split(":")))
+ path = frozensets.setdefault(path, path)
+ needed = frozenset(x for x in fields[4].split(",") if x)
+ needed = frozensets.setdefault(needed, needed)
+
+ obj_key = self._obj_key(obj)
+ indexed = True
+ myprops = obj_properties.get(obj_key)
+ if myprops is None:
+ indexed = False
+ myprops = self._obj_properies_class(
+ arch, needed, path, soname, [], owner)
+ obj_properties[obj_key] = myprops
+ # All object paths are added into the obj_properties tuple.
+ myprops.alt_paths.append(obj)
+
+ # Don't index the same file more that once since only one
+ # set of data can be correct and therefore mixing data
+ # may corrupt the index (include_file overrides previously
+ # installed).
+ if indexed:
+ continue
+
+ arch_map = libs.get(arch)
+ if arch_map is None:
+ arch_map = {}
+ libs[arch] = arch_map
+ if soname:
+ soname_map = arch_map.get(soname)
+ if soname_map is None:
+ soname_map = self._soname_map_class(
+ providers=[], consumers=[])
+ arch_map[soname] = soname_map
+ soname_map.providers.append(obj_key)
+ for needed_soname in needed:
+ soname_map = arch_map.get(needed_soname)
+ if soname_map is None:
+ soname_map = self._soname_map_class(
+ providers=[], consumers=[])
+ arch_map[needed_soname] = soname_map
+ soname_map.consumers.append(obj_key)
+
+ for arch, sonames in libs.items():
+ for soname_node in sonames.values():
+ soname_node.providers = tuple(set(soname_node.providers))
+ soname_node.consumers = tuple(set(soname_node.consumers))
+
+ def listBrokenBinaries(self, debug=False):
+ """
+ Find binaries and their needed sonames, which have no providers.
+
+ @param debug: Boolean to enable debug output
+ @type debug: Boolean
+ @rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
+ @return: The return value is an object -> set-of-sonames mapping, where
+ object is a broken binary and the set consists of sonames needed by
+ object that have no corresponding libraries to fulfill the dependency.
+
+ """
+
+ os = _os_merge
+
+ class _LibraryCache(object):
+
+ """
+ Caches properties associated with paths.
+
+ The purpose of this class is to prevent multiple instances of
+ _ObjectKey for the same paths.
+
+ """
+
+ def __init__(cache_self):
+ cache_self.cache = {}
+
+ def get(cache_self, obj):
+ """
+ Caches and returns properties associated with an object.
+
+ @param obj: absolute path (can be symlink)
+ @type obj: string (example: '/usr/lib/libfoo.so')
+ @rtype: 4-tuple with types
+ (string or None, string or None, 2-tuple, Boolean)
+ @return: 4-tuple with the following components:
+ 1. arch as a string or None if it does not exist,
+ 2. soname as a string or None if it does not exist,
+ 3. obj_key as 2-tuple,
+ 4. Boolean representing whether the object exists.
+ (example: ('libfoo.so.1', (123L, 456L), True))
+
+ """
+ if obj in cache_self.cache:
+ return cache_self.cache[obj]
+ else:
+ obj_key = self._obj_key(obj)
+ # Check that the library exists on the filesystem.
+ if obj_key.file_exists():
+ # Get the arch and soname from LinkageMap._obj_properties if
+ # it exists. Otherwise, None.
+ obj_props = self._obj_properties.get(obj_key)
+ if obj_props is None:
+ arch = None
+ soname = None
+ else:
+ arch = obj_props.arch
+ soname = obj_props.soname
+ return cache_self.cache.setdefault(obj, \
+ (arch, soname, obj_key, True))
+ else:
+ return cache_self.cache.setdefault(obj, \
+ (None, None, obj_key, False))
+
+ rValue = {}
+ cache = _LibraryCache()
+ providers = self.listProviders()
+
+ # Iterate over all obj_keys and their providers.
+ for obj_key, sonames in providers.items():
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ path = obj_props.runpaths
+ objs = obj_props.alt_paths
+ path = path.union(self._defpath)
+ # Iterate over each needed soname and the set of library paths that
+ # fulfill the soname to determine if the dependency is broken.
+ for soname, libraries in sonames.items():
+ # validLibraries is used to store libraries, which satisfy soname,
+ # so if no valid libraries are found, the soname is not satisfied
+ # for obj_key. If unsatisfied, objects associated with obj_key
+ # must be emerged.
+ validLibraries = set()
+ # It could be the case that the library to satisfy the soname is
+ # not in the obj's runpath, but a symlink to the library is (eg
+ # libnvidia-tls.so.1 in nvidia-drivers). Also, since LinkageMap
+ # does not catalog symlinks, broken or missing symlinks may go
+ # unnoticed. As a result of these cases, check that a file with
+ # the same name as the soname exists in obj's runpath.
+ # XXX If we catalog symlinks in LinkageMap, this could be improved.
+ for directory in path:
+ cachedArch, cachedSoname, cachedKey, cachedExists = \
+ cache.get(os.path.join(directory, soname))
+ # Check that this library provides the needed soname. Doing
+ # this, however, will cause consumers of libraries missing
+ # sonames to be unnecessarily emerged. (eg libmix.so)
+ if cachedSoname == soname and cachedArch == arch:
+ validLibraries.add(cachedKey)
+ if debug and cachedKey not in \
+ set(map(self._obj_key_cache.get, libraries)):
+ # XXX This is most often due to soname symlinks not in
+ # a library's directory. We could catalog symlinks in
+ # LinkageMap to avoid checking for this edge case here.
+ writemsg_level(
+ _("Found provider outside of findProviders:") + \
+ (" %s -> %s %s\n" % (os.path.join(directory, soname),
+ self._obj_properties[cachedKey].alt_paths, libraries)),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ # A valid library has been found, so there is no need to
+ # continue.
+ break
+ if debug and cachedArch == arch and \
+ cachedKey in self._obj_properties:
+ writemsg_level((_("Broken symlink or missing/bad soname: " + \
+ "%(dir_soname)s -> %(cachedKey)s " + \
+ "with soname %(cachedSoname)s but expecting %(soname)s") % \
+ {"dir_soname":os.path.join(directory, soname),
+ "cachedKey": self._obj_properties[cachedKey],
+ "cachedSoname": cachedSoname, "soname":soname}) + "\n",
+ level=logging.DEBUG,
+ noiselevel=-1)
+ # This conditional checks if there are no libraries to satisfy the
+ # soname (empty set).
+ if not validLibraries:
+ for obj in objs:
+ rValue.setdefault(obj, set()).add(soname)
+ # If no valid libraries have been found by this point, then
+ # there are no files named with the soname within obj's runpath,
+ # but if there are libraries (from the providers mapping), it is
+ # likely that soname symlinks or the actual libraries are
+ # missing or broken. Thus those libraries are added to rValue
+ # in order to emerge corrupt library packages.
+ for lib in libraries:
+ rValue.setdefault(lib, set()).add(soname)
+ if debug:
+ if not os.path.isfile(lib):
+ writemsg_level(_("Missing library:") + " %s\n" % (lib,),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ else:
+ writemsg_level(_("Possibly missing symlink:") + \
+ "%s\n" % (os.path.join(os.path.dirname(lib), soname)),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ return rValue
+
+ def listProviders(self):
+ """
+ Find the providers for all object keys in LinkageMap.
+
+ @rtype: dict (example:
+ {(123L, 456L): {'libbar.so': set(['/lib/libbar.so.1.5'])}})
+ @return: The return value is an object key -> providers mapping, where
+ providers is a mapping of soname -> set-of-library-paths returned
+ from the findProviders method.
+
+ """
+ rValue = {}
+ if not self._libs:
+ self.rebuild()
+ # Iterate over all object keys within LinkageMap.
+ for obj_key in self._obj_properties:
+ rValue.setdefault(obj_key, self.findProviders(obj_key))
+ return rValue
+
+ def isMasterLink(self, obj):
+ """
+ Determine whether an object is a "master" symlink, which means
+ that its basename is the same as the beginning part of the
+ soname and it lacks the soname's version component.
+
+ Examples:
+
+ soname | master symlink name
+ --------------------------------------------
+ libarchive.so.2.8.4 | libarchive.so
+ libproc-3.2.8.so | libproc.so
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/foo')
+ @rtype: Boolean
+ @return:
+ 1. True if obj is a master link
+ 2. False if obj is not a master link
+
+ """
+ os = _os_merge
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+ basename = os.path.basename(obj)
+ soname = self._obj_properties[obj_key].soname
+ return len(basename) < len(soname) and \
+ basename.endswith(".so") and \
+ soname.startswith(basename[:-3])
+
+ def listLibraryObjects(self):
+ """
+ Return a list of library objects.
+
+ Known limitation: library objects lacking an soname are not included.
+
+ @rtype: list of strings
+ @return: list of paths to all providers
+
+ """
+ rValue = []
+ if not self._libs:
+ self.rebuild()
+ for arch_map in self._libs.values():
+ for soname_map in arch_map.values():
+ for obj_key in soname_map.providers:
+ rValue.extend(self._obj_properties[obj_key].alt_paths)
+ return rValue
+
+ def getOwners(self, obj):
+ """
+ Return the package(s) associated with an object. Raises KeyError
+ if the object is unknown. Returns an empty tuple if the owner(s)
+ are unknown.
+
+ NOTE: For preserved libraries, the owner(s) may have been
+ previously uninstalled, but these uninstalled owners can be
+ returned by this method since they are registered in the
+ PreservedLibsRegistry.
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/bar')
+ @rtype: tuple
+ @return: a tuple of cpv
+ """
+ if not self._libs:
+ self.rebuild()
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ else:
+ obj_key = self._obj_key_cache.get(obj)
+ if obj_key is None:
+ raise KeyError("%s not in object list" % obj)
+ obj_props = self._obj_properties.get(obj_key)
+ if obj_props is None:
+ raise KeyError("%s not in object list" % obj_key)
+ if obj_props.owner is None:
+ return ()
+ return (obj_props.owner,)
+
+ def getSoname(self, obj):
+ """
+ Return the soname associated with an object.
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/bar')
+ @rtype: string
+ @return: soname as a string
+
+ """
+ if not self._libs:
+ self.rebuild()
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ return self._obj_properties[obj_key].soname
+ if obj not in self._obj_key_cache:
+ raise KeyError("%s not in object list" % obj)
+ return self._obj_properties[self._obj_key_cache[obj]].soname
+
+ def findProviders(self, obj):
+ """
+ Find providers for an object or object key.
+
+ This method may be called with a key from _obj_properties.
+
+ In some cases, not all valid libraries are returned. This may occur when
+ an soname symlink referencing a library is in an object's runpath while
+ the actual library is not. We should consider cataloging symlinks within
+ LinkageMap as this would avoid those cases and would be a better model of
+ library dependencies (since the dynamic linker actually searches for
+ files named with the soname in the runpaths).
+
+ @param obj: absolute path to an object or a key from _obj_properties
+ @type obj: string (example: '/usr/bin/bar') or _ObjectKey
+ @rtype: dict (example: {'libbar.so': set(['/lib/libbar.so.1.5'])})
+ @return: The return value is a soname -> set-of-library-paths, where
+ set-of-library-paths satisfy soname.
+
+ """
+
+ os = _os_merge
+
+ rValue = {}
+
+ if not self._libs:
+ self.rebuild()
+
+ # Determine the obj_key from the arguments.
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ else:
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ needed = obj_props.needed
+ path = obj_props.runpaths
+ path_keys = set(self._path_key(x) for x in path.union(self._defpath))
+ for soname in needed:
+ rValue[soname] = set()
+ if arch not in self._libs or soname not in self._libs[arch]:
+ continue
+ # For each potential provider of the soname, add it to rValue if it
+ # resides in the obj's runpath.
+ for provider_key in self._libs[arch][soname].providers:
+ providers = self._obj_properties[provider_key].alt_paths
+ for provider in providers:
+ if self._path_key(os.path.dirname(provider)) in path_keys:
+ rValue[soname].add(provider)
+ return rValue
+
+ def findConsumers(self, obj, exclude_providers=None):
+ """
+ Find consumers of an object or object key.
+
+ This method may be called with a key from _obj_properties. If this
+ method is going to be called with an object key, to avoid not catching
+ shadowed libraries, do not pass new _ObjectKey instances to this method.
+ Instead pass the obj as a string.
+
+ In some cases, not all consumers are returned. This may occur when
+ an soname symlink referencing a library is in an object's runpath while
+ the actual library is not. For example, this problem is noticeable for
+ binutils since it's libraries are added to the path via symlinks that
+ are gemerated in the /usr/$CHOST/lib/ directory by binutils-config.
+ Failure to recognize consumers of these symlinks makes preserve-libs
+ fail to preserve binutils libs that are needed by these unrecognized
+ consumers.
+
+ Note that library consumption via dlopen (common for kde plugins) is
+ currently undetected. However, it is possible to use the
+ corresponding libtool archive (*.la) files to detect such consumers
+ (revdep-rebuild is able to detect them).
+
+ The exclude_providers argument is useful for determining whether
+ removal of one or more packages will create unsatisfied consumers. When
+ this option is given, consumers are excluded from the results if there
+ is an alternative provider (which is not excluded) of the required
+ soname such that the consumers will remain satisfied if the files
+ owned by exclude_providers are removed.
+
+ @param obj: absolute path to an object or a key from _obj_properties
+ @type obj: string (example: '/usr/bin/bar') or _ObjectKey
+ @param exclude_providers: A collection of callables that each take a
+ single argument referring to the path of a library (example:
+ '/usr/lib/libssl.so.0.9.8'), and return True if the library is
+ owned by a provider which is planned for removal.
+ @type exclude_providers: collection
+ @rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
+ @return: The return value is a soname -> set-of-library-paths, where
+ set-of-library-paths satisfy soname.
+
+ """
+
+ os = _os_merge
+
+ if not self._libs:
+ self.rebuild()
+
+ # Determine the obj_key and the set of objects matching the arguments.
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ objs = self._obj_properties[obj_key].alt_paths
+ else:
+ objs = set([obj])
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+
+ # If there is another version of this lib with the
+ # same soname and the soname symlink points to that
+ # other version, this lib will be shadowed and won't
+ # have any consumers.
+ if not isinstance(obj, self._ObjectKey):
+ soname = self._obj_properties[obj_key].soname
+ soname_link = os.path.join(self._root,
+ os.path.dirname(obj).lstrip(os.path.sep), soname)
+ obj_path = os.path.join(self._root, obj.lstrip(os.sep))
+ try:
+ soname_st = os.stat(soname_link)
+ obj_st = os.stat(obj_path)
+ except OSError:
+ pass
+ else:
+ if (obj_st.st_dev, obj_st.st_ino) != \
+ (soname_st.st_dev, soname_st.st_ino):
+ return set()
+
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ soname = obj_props.soname
+
+ soname_node = None
+ arch_map = self._libs.get(arch)
+ if arch_map is not None:
+ soname_node = arch_map.get(soname)
+
+ defpath_keys = set(self._path_key(x) for x in self._defpath)
+ satisfied_consumer_keys = set()
+ if soname_node is not None:
+ if exclude_providers is not None:
+ relevant_dir_keys = set()
+ for provider_key in soname_node.providers:
+ provider_objs = self._obj_properties[provider_key].alt_paths
+ for p in provider_objs:
+ provider_excluded = False
+ for excluded_provider_isowner in exclude_providers:
+ if excluded_provider_isowner(p):
+ provider_excluded = True
+ break
+ if not provider_excluded:
+ # This provider is not excluded. It will
+ # satisfy a consumer of this soname if it
+ # is in the default ld.so path or the
+ # consumer's runpath.
+ relevant_dir_keys.add(
+ self._path_key(os.path.dirname(p)))
+
+ if relevant_dir_keys:
+ for consumer_key in soname_node.consumers:
+ path = self._obj_properties[consumer_key].runpaths
+ path_keys = defpath_keys.copy()
+ path_keys.update(self._path_key(x) for x in path)
+ if relevant_dir_keys.intersection(path_keys):
+ satisfied_consumer_keys.add(consumer_key)
+
+ rValue = set()
+ if soname_node is not None:
+ # For each potential consumer, add it to rValue if an object from the
+ # arguments resides in the consumer's runpath.
+ objs_dir_keys = set(self._path_key(os.path.dirname(x))
+ for x in objs)
+ for consumer_key in soname_node.consumers:
+ if consumer_key in satisfied_consumer_keys:
+ continue
+ consumer_props = self._obj_properties[consumer_key]
+ path = consumer_props.runpaths
+ consumer_objs = consumer_props.alt_paths
+ path_keys = defpath_keys.union(self._path_key(x) for x in path)
+ if objs_dir_keys.intersection(path_keys):
+ rValue.update(consumer_objs)
+ return rValue
diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py b/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
new file mode 100644
index 0000000..602cf87
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
@@ -0,0 +1,172 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import sys
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+from portage import os
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.exception import PermissionDenied
+from portage.localization import _
+from portage.util import atomic_ofstream
+from portage.util import writemsg_level
+from portage.versions import cpv_getkey
+from portage.locks import lockfile, unlockfile
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class PreservedLibsRegistry(object):
+ """ This class handles the tracking of preserved library objects """
+ def __init__(self, root, filename):
+ """
+ @param root: root used to check existence of paths in pruneNonExisting
+ @type root: String
+ @param filename: absolute path for saving the preserved libs records
+ @type filename: String
+ """
+ self._root = root
+ self._filename = filename
+ self._data = None
+ self._lock = None
+
+ def lock(self):
+ """Grab an exclusive lock on the preserved libs registry."""
+ if self._lock is not None:
+ raise AssertionError("already locked")
+ self._lock = lockfile(self._filename)
+
+ def unlock(self):
+ """Release our exclusive lock on the preserved libs registry."""
+ if self._lock is None:
+ raise AssertionError("not locked")
+ unlockfile(self._lock)
+ self._lock = None
+
+ def load(self):
+ """ Reload the registry data from file """
+ self._data = None
+ try:
+ self._data = pickle.load(
+ open(_unicode_encode(self._filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb'))
+ except (ValueError, pickle.UnpicklingError) as e:
+ writemsg_level(_("!!! Error loading '%s': %s\n") % \
+ (self._filename, e), level=logging.ERROR, noiselevel=-1)
+ except (EOFError, IOError) as e:
+ if isinstance(e, EOFError) or e.errno == errno.ENOENT:
+ pass
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(self._filename)
+ else:
+ raise
+ if self._data is None:
+ self._data = {}
+ self._data_orig = self._data.copy()
+ self.pruneNonExisting()
+
+ def store(self):
+ """
+ Store the registry data to the file. The existing inode will be
+ replaced atomically, so if that inode is currently being used
+ for a lock then that lock will be rendered useless. Therefore,
+ it is important not to call this method until the current lock
+ is ready to be immediately released.
+ """
+ if os.environ.get("SANDBOX_ON") == "1" or \
+ self._data == self._data_orig:
+ return
+ try:
+ f = atomic_ofstream(self._filename, 'wb')
+ pickle.dump(self._data, f, protocol=2)
+ f.close()
+ except EnvironmentError as e:
+ if e.errno != PermissionDenied.errno:
+ writemsg_level("!!! %s %s\n" % (e, self._filename),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ self._data_orig = self._data.copy()
+
+ def _normalize_counter(self, counter):
+ """
+ For simplicity, normalize as a unicode string
+ and strip whitespace. This avoids the need for
+ int conversion and a possible ValueError resulting
+ from vardb corruption.
+ """
+ if not isinstance(counter, basestring):
+ counter = str(counter)
+ return _unicode_decode(counter).strip()
+
+ def register(self, cpv, slot, counter, paths):
+ """ Register new objects in the registry. If there is a record with the
+ same packagename (internally derived from cpv) and slot it is
+ overwritten with the new data.
+ @param cpv: package instance that owns the objects
+ @type cpv: CPV (as String)
+ @param slot: the value of SLOT of the given package instance
+ @type slot: String
+ @param counter: vdb counter value for the package instance
+ @type counter: String
+ @param paths: absolute paths of objects that got preserved during an update
+ @type paths: List
+ """
+ cp = cpv_getkey(cpv)
+ cps = cp+":"+slot
+ counter = self._normalize_counter(counter)
+ if len(paths) == 0 and cps in self._data \
+ and self._data[cps][0] == cpv and \
+ self._normalize_counter(self._data[cps][1]) == counter:
+ del self._data[cps]
+ elif len(paths) > 0:
+ self._data[cps] = (cpv, counter, paths)
+
+ def unregister(self, cpv, slot, counter):
+ """ Remove a previous registration of preserved objects for the given package.
+ @param cpv: package instance whose records should be removed
+ @type cpv: CPV (as String)
+ @param slot: the value of SLOT of the given package instance
+ @type slot: String
+ """
+ self.register(cpv, slot, counter, [])
+
+ def pruneNonExisting(self):
+ """ Remove all records for objects that no longer exist on the filesystem. """
+
+ os = _os_merge
+
+ for cps in list(self._data):
+ cpv, counter, paths = self._data[cps]
+ paths = [f for f in paths \
+ if os.path.exists(os.path.join(self._root, f.lstrip(os.sep)))]
+ if len(paths) > 0:
+ self._data[cps] = (cpv, counter, paths)
+ else:
+ del self._data[cps]
+
+ def hasEntries(self):
+ """ Check if this registry contains any records. """
+ if self._data is None:
+ self.load()
+ return len(self._data) > 0
+
+ def getPreservedLibs(self):
+ """ Return a mapping of packages->preserved objects.
+ @returns mapping of package instances to preserved objects
+ @rtype Dict cpv->list-of-paths
+ """
+ if self._data is None:
+ self.load()
+ rValue = {}
+ for cps in self._data:
+ rValue[self._data[cps][0]] = self._data[cps][2]
+ return rValue
diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.py b/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/util/_pty.py b/portage_with_autodep/pym/portage/util/_pty.py
new file mode 100644
index 0000000..f45ff0a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_pty.py
@@ -0,0 +1,212 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import fcntl
+import platform
+import pty
+import select
+import sys
+import termios
+
+from portage import os, _unicode_decode, _unicode_encode
+from portage.output import get_term_size, set_term_size
+from portage.process import spawn_bash
+from portage.util import writemsg
+
+def _can_test_pty_eof():
+ """
+ The _test_pty_eof() function seems to hang on most
+ kernels other than Linux.
+ This was reported for the following kernels which used to work fine
+ without this EOF test: Darwin, AIX, FreeBSD. They seem to hang on
+ the slave_file.close() call. Note that Python's implementation of
+ openpty on Solaris already caused random hangs without this EOF test
+ and hence is globally disabled.
+ @rtype: bool
+ @returns: True if _test_pty_eof() won't hang, False otherwise.
+ """
+ return platform.system() in ("Linux",)
+
+def _test_pty_eof(fdopen_buffered=False):
+ """
+ Returns True if this issues is fixed for the currently
+ running version of python: http://bugs.python.org/issue5380
+ Raises an EnvironmentError from openpty() if it fails.
+
+ NOTE: This issue is only problematic when array.fromfile()
+ is used, rather than os.read(). However, array.fromfile()
+ is preferred since it is approximately 10% faster.
+
+ New development: It appears that array.fromfile() is usable
+ with python3 as long as fdopen is called with a bufsize
+ argument of 0.
+ """
+
+ use_fork = False
+
+ test_string = 2 * "blah blah blah\n"
+ test_string = _unicode_decode(test_string,
+ encoding='utf_8', errors='strict')
+
+ # may raise EnvironmentError
+ master_fd, slave_fd = pty.openpty()
+
+ # Non-blocking mode is required for Darwin kernel.
+ fcntl.fcntl(master_fd, fcntl.F_SETFL,
+ fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # Disable post-processing of output since otherwise weird
+ # things like \n -> \r\n transformations may occur.
+ mode = termios.tcgetattr(slave_fd)
+ mode[1] &= ~termios.OPOST
+ termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
+
+ # Simulate a subprocess writing some data to the
+ # slave end of the pipe, and then exiting.
+ pid = None
+ if use_fork:
+ pids = spawn_bash(_unicode_encode("echo -n '%s'" % test_string,
+ encoding='utf_8', errors='strict'), env=os.environ,
+ fd_pipes={0:sys.stdin.fileno(), 1:slave_fd, 2:slave_fd},
+ returnpid=True)
+ if isinstance(pids, int):
+ os.close(master_fd)
+ os.close(slave_fd)
+ raise EnvironmentError('spawn failed')
+ pid = pids[0]
+ else:
+ os.write(slave_fd, _unicode_encode(test_string,
+ encoding='utf_8', errors='strict'))
+ os.close(slave_fd)
+
+ # If using a fork, we must wait for the child here,
+ # in order to avoid a race condition that would
+ # lead to inconsistent results.
+ if pid is not None:
+ os.waitpid(pid, 0)
+
+ if fdopen_buffered:
+ master_file = os.fdopen(master_fd, 'rb')
+ else:
+ master_file = os.fdopen(master_fd, 'rb', 0)
+ eof = False
+ data = []
+ iwtd = [master_file]
+ owtd = []
+ ewtd = []
+
+ while not eof:
+
+ events = select.select(iwtd, owtd, ewtd)
+ if not events[0]:
+ eof = True
+ break
+
+ buf = array.array('B')
+ try:
+ buf.fromfile(master_file, 1024)
+ except (EOFError, IOError):
+ eof = True
+
+ if not buf:
+ eof = True
+ else:
+ data.append(_unicode_decode(buf.tostring(),
+ encoding='utf_8', errors='strict'))
+
+ master_file.close()
+
+ return test_string == ''.join(data)
+
+# If _test_pty_eof() can't be used for runtime detection of
+# http://bugs.python.org/issue5380, openpty can't safely be used
+# unless we can guarantee that the current version of python has
+# been fixed (affects all current versions of python3). When
+# this issue is fixed in python3, we can add another sys.hexversion
+# conditional to enable openpty support in the fixed versions.
+if sys.hexversion >= 0x3000000 and not _can_test_pty_eof():
+ _disable_openpty = True
+else:
+ # Disable the use of openpty on Solaris as it seems Python's openpty
+ # implementation doesn't play nice on Solaris with Portage's
+ # behaviour causing hangs/deadlocks.
+ # Additional note for the future: on Interix, pipes do NOT work, so
+ # _disable_openpty on Interix must *never* be True
+ _disable_openpty = platform.system() in ("SunOS",)
+_tested_pty = False
+
+if not _can_test_pty_eof():
+ # Skip _test_pty_eof() on systems where it hangs.
+ _tested_pty = True
+
+_fbsd_test_pty = platform.system() == 'FreeBSD'
+
+def _create_pty_or_pipe(copy_term_size=None):
+ """
+ Try to create a pty and if then fails then create a normal
+ pipe instead.
+
+ @param copy_term_size: If a tty file descriptor is given
+ then the term size will be copied to the pty.
+ @type copy_term_size: int
+ @rtype: tuple
+ @returns: A tuple of (is_pty, master_fd, slave_fd) where
+ is_pty is True if a pty was successfully allocated, and
+ False if a normal pipe was allocated.
+ """
+
+ got_pty = False
+
+ global _disable_openpty, _fbsd_test_pty, _tested_pty
+ if not (_tested_pty or _disable_openpty):
+ try:
+ if not _test_pty_eof():
+ _disable_openpty = True
+ except EnvironmentError as e:
+ _disable_openpty = True
+ writemsg("openpty failed: '%s'\n" % str(e),
+ noiselevel=-1)
+ del e
+ _tested_pty = True
+
+ if _fbsd_test_pty and not _disable_openpty:
+ # Test for python openpty breakage after freebsd7 to freebsd8
+ # upgrade, which results in a 'Function not implemented' error
+ # and the process being killed.
+ pid = os.fork()
+ if pid == 0:
+ pty.openpty()
+ os._exit(os.EX_OK)
+ pid, status = os.waitpid(pid, 0)
+ if (status & 0xff) == 140:
+ _disable_openpty = True
+ _fbsd_test_pty = False
+
+ if _disable_openpty:
+ master_fd, slave_fd = os.pipe()
+ else:
+ try:
+ master_fd, slave_fd = pty.openpty()
+ got_pty = True
+ except EnvironmentError as e:
+ _disable_openpty = True
+ writemsg("openpty failed: '%s'\n" % str(e),
+ noiselevel=-1)
+ del e
+ master_fd, slave_fd = os.pipe()
+
+ if got_pty:
+ # Disable post-processing of output since otherwise weird
+ # things like \n -> \r\n transformations may occur.
+ mode = termios.tcgetattr(slave_fd)
+ mode[1] &= ~termios.OPOST
+ termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
+
+ if got_pty and \
+ copy_term_size is not None and \
+ os.isatty(copy_term_size):
+ rows, columns = get_term_size()
+ set_term_size(rows, columns, slave_fd)
+
+ return (got_pty, master_fd, slave_fd)
diff --git a/portage_with_autodep/pym/portage/util/digraph.py b/portage_with_autodep/pym/portage/util/digraph.py
new file mode 100644
index 0000000..1bbe10f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/digraph.py
@@ -0,0 +1,342 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['digraph']
+
+from collections import deque
+import sys
+
+from portage import _unicode_decode
+from portage.util import writemsg
+
+class digraph(object):
+ """
+ A directed graph object.
+ """
+
+ def __init__(self):
+ """Create an empty digraph"""
+
+ # { node : ( { child : priority } , { parent : priority } ) }
+ self.nodes = {}
+ self.order = []
+
+ def add(self, node, parent, priority=0):
+ """Adds the specified node with the specified parent.
+
+ If the dep is a soft-dep and the node already has a hard
+ relationship to the parent, the relationship is left as hard."""
+
+ if node not in self.nodes:
+ self.nodes[node] = ({}, {}, node)
+ self.order.append(node)
+
+ if not parent:
+ return
+
+ if parent not in self.nodes:
+ self.nodes[parent] = ({}, {}, parent)
+ self.order.append(parent)
+
+ priorities = self.nodes[node][1].get(parent)
+ if priorities is None:
+ priorities = []
+ self.nodes[node][1][parent] = priorities
+ self.nodes[parent][0][node] = priorities
+ priorities.append(priority)
+ priorities.sort()
+
+ def remove(self, node):
+ """Removes the specified node from the digraph, also removing
+ and ties to other nodes in the digraph. Raises KeyError if the
+ node doesn't exist."""
+
+ if node not in self.nodes:
+ raise KeyError(node)
+
+ for parent in self.nodes[node][1]:
+ del self.nodes[parent][0][node]
+ for child in self.nodes[node][0]:
+ del self.nodes[child][1][node]
+
+ del self.nodes[node]
+ self.order.remove(node)
+
+ def difference_update(self, t):
+ """
+ Remove all given nodes from node_set. This is more efficient
+ than multiple calls to the remove() method.
+ """
+ if isinstance(t, (list, tuple)) or \
+ not hasattr(t, "__contains__"):
+ t = frozenset(t)
+ order = []
+ for node in self.order:
+ if node not in t:
+ order.append(node)
+ continue
+ for parent in self.nodes[node][1]:
+ del self.nodes[parent][0][node]
+ for child in self.nodes[node][0]:
+ del self.nodes[child][1][node]
+ del self.nodes[node]
+ self.order = order
+
+ def remove_edge(self, child, parent):
+ """
+ Remove edge in the direction from child to parent. Note that it is
+ possible for a remaining edge to exist in the opposite direction.
+ Any endpoint vertices that become isolated will remain in the graph.
+ """
+
+ # Nothing should be modified when a KeyError is raised.
+ for k in parent, child:
+ if k not in self.nodes:
+ raise KeyError(k)
+
+ # Make sure the edge exists.
+ if child not in self.nodes[parent][0]:
+ raise KeyError(child)
+ if parent not in self.nodes[child][1]:
+ raise KeyError(parent)
+
+ # Remove the edge.
+ del self.nodes[child][1][parent]
+ del self.nodes[parent][0][child]
+
+ def __iter__(self):
+ return iter(self.order)
+
+ def contains(self, node):
+ """Checks if the digraph contains mynode"""
+ return node in self.nodes
+
+ def get(self, key, default=None):
+ node_data = self.nodes.get(key, self)
+ if node_data is self:
+ return default
+ return node_data[2]
+
+ def all_nodes(self):
+ """Return a list of all nodes in the graph"""
+ return self.order[:]
+
+ def child_nodes(self, node, ignore_priority=None):
+ """Return all children of the specified node"""
+ if ignore_priority is None:
+ return list(self.nodes[node][0])
+ children = []
+ if hasattr(ignore_priority, '__call__'):
+ for child, priorities in self.nodes[node][0].items():
+ for priority in priorities:
+ if not ignore_priority(priority):
+ children.append(child)
+ break
+ else:
+ for child, priorities in self.nodes[node][0].items():
+ if ignore_priority < priorities[-1]:
+ children.append(child)
+ return children
+
+ def parent_nodes(self, node, ignore_priority=None):
+ """Return all parents of the specified node"""
+ if ignore_priority is None:
+ return list(self.nodes[node][1])
+ parents = []
+ if hasattr(ignore_priority, '__call__'):
+ for parent, priorities in self.nodes[node][1].items():
+ for priority in priorities:
+ if not ignore_priority(priority):
+ parents.append(parent)
+ break
+ else:
+ for parent, priorities in self.nodes[node][1].items():
+ if ignore_priority < priorities[-1]:
+ parents.append(parent)
+ return parents
+
+ def leaf_nodes(self, ignore_priority=None):
+ """Return all nodes that have no children
+
+ If ignore_soft_deps is True, soft deps are not counted as
+ children in calculations."""
+
+ leaf_nodes = []
+ if ignore_priority is None:
+ for node in self.order:
+ if not self.nodes[node][0]:
+ leaf_nodes.append(node)
+ elif hasattr(ignore_priority, '__call__'):
+ for node in self.order:
+ is_leaf_node = True
+ for child, priorities in self.nodes[node][0].items():
+ for priority in priorities:
+ if not ignore_priority(priority):
+ is_leaf_node = False
+ break
+ if not is_leaf_node:
+ break
+ if is_leaf_node:
+ leaf_nodes.append(node)
+ else:
+ for node in self.order:
+ is_leaf_node = True
+ for child, priorities in self.nodes[node][0].items():
+ if ignore_priority < priorities[-1]:
+ is_leaf_node = False
+ break
+ if is_leaf_node:
+ leaf_nodes.append(node)
+ return leaf_nodes
+
+ def root_nodes(self, ignore_priority=None):
+ """Return all nodes that have no parents.
+
+ If ignore_soft_deps is True, soft deps are not counted as
+ parents in calculations."""
+
+ root_nodes = []
+ if ignore_priority is None:
+ for node in self.order:
+ if not self.nodes[node][1]:
+ root_nodes.append(node)
+ elif hasattr(ignore_priority, '__call__'):
+ for node in self.order:
+ is_root_node = True
+ for parent, priorities in self.nodes[node][1].items():
+ for priority in priorities:
+ if not ignore_priority(priority):
+ is_root_node = False
+ break
+ if not is_root_node:
+ break
+ if is_root_node:
+ root_nodes.append(node)
+ else:
+ for node in self.order:
+ is_root_node = True
+ for parent, priorities in self.nodes[node][1].items():
+ if ignore_priority < priorities[-1]:
+ is_root_node = False
+ break
+ if is_root_node:
+ root_nodes.append(node)
+ return root_nodes
+
+ def __bool__(self):
+ return bool(self.nodes)
+
+ def is_empty(self):
+ """Checks if the digraph is empty"""
+ return len(self.nodes) == 0
+
+ def clone(self):
+ clone = digraph()
+ clone.nodes = {}
+ memo = {}
+ for children, parents, node in self.nodes.values():
+ children_clone = {}
+ for child, priorities in children.items():
+ priorities_clone = memo.get(id(priorities))
+ if priorities_clone is None:
+ priorities_clone = priorities[:]
+ memo[id(priorities)] = priorities_clone
+ children_clone[child] = priorities_clone
+ parents_clone = {}
+ for parent, priorities in parents.items():
+ priorities_clone = memo.get(id(priorities))
+ if priorities_clone is None:
+ priorities_clone = priorities[:]
+ memo[id(priorities)] = priorities_clone
+ parents_clone[parent] = priorities_clone
+ clone.nodes[node] = (children_clone, parents_clone, node)
+ clone.order = self.order[:]
+ return clone
+
+ def delnode(self, node):
+ try:
+ self.remove(node)
+ except KeyError:
+ pass
+
+ def firstzero(self):
+ leaf_nodes = self.leaf_nodes()
+ if leaf_nodes:
+ return leaf_nodes[0]
+ return None
+
+ def hasallzeros(self, ignore_priority=None):
+ return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
+ len(self.order)
+
+ def debug_print(self):
+ def output(s):
+ writemsg(s, noiselevel=-1)
+ # Use _unicode_decode() to force unicode format
+ # strings for python-2.x safety, ensuring that
+ # node.__unicode__() is used when necessary.
+ for node in self.nodes:
+ output(_unicode_decode("%s ") % (node,))
+ if self.nodes[node][0]:
+ output("depends on\n")
+ else:
+ output("(no children)\n")
+ for child, priorities in self.nodes[node][0].items():
+ output(_unicode_decode(" %s (%s)\n") % \
+ (child, priorities[-1],))
+
+ def bfs(self, start, ignore_priority=None):
+ if start not in self:
+ raise KeyError(start)
+
+ queue, enqueued = deque([(None, start)]), set([start])
+ while queue:
+ parent, n = queue.popleft()
+ yield parent, n
+ new = set(self.child_nodes(n, ignore_priority)) - enqueued
+ enqueued |= new
+ queue.extend([(n, child) for child in new])
+
+ def shortest_path(self, start, end, ignore_priority=None):
+ if start not in self:
+ raise KeyError(start)
+ elif end not in self:
+ raise KeyError(end)
+
+ paths = {None: []}
+ for parent, child in self.bfs(start, ignore_priority):
+ paths[child] = paths[parent] + [child]
+ if child == end:
+ return paths[child]
+ return None
+
+ def get_cycles(self, ignore_priority=None, max_length=None):
+ """
+ Returns all cycles that have at most length 'max_length'.
+ If 'max_length' is 'None', all cycles are returned.
+ """
+ all_cycles = []
+ for node in self.nodes:
+ shortest_path = None
+ for child in self.child_nodes(node, ignore_priority):
+ path = self.shortest_path(child, node, ignore_priority)
+ if path is None:
+ continue
+ if not shortest_path or len(shortest_path) > len(path):
+ shortest_path = path
+ if shortest_path:
+ if not max_length or len(shortest_path) <= max_length:
+ all_cycles.append(shortest_path)
+ return all_cycles
+
+ # Backward compatibility
+ addnode = add
+ allnodes = all_nodes
+ allzeros = leaf_nodes
+ hasnode = contains
+ __contains__ = contains
+ empty = is_empty
+ copy = clone
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
diff --git a/portage_with_autodep/pym/portage/util/env_update.py b/portage_with_autodep/pym/portage/util/env_update.py
new file mode 100644
index 0000000..eb8a0d9
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/env_update.py
@@ -0,0 +1,293 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['env_update']
+
+import errno
+import io
+import stat
+import sys
+import time
+
+import portage
+from portage import os, _encodings, _unicode_encode
+from portage.checksum import prelink_capable
+from portage.data import ostype
+from portage.exception import ParseError
+from portage.localization import _
+from portage.process import find_binary
+from portage.util import atomic_ofstream, ensure_dirs, getconfig, \
+ normalize_path, writemsg
+from portage.util.listdir import listdir
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
+ env=None, writemsg_level=None):
+ """
+ Parse /etc/env.d and use it to generate /etc/profile.env, csh.env,
+ ld.so.conf, and prelink.conf. Finally, run ldconfig. When ldconfig is
+ called, its -X option will be used in order to avoid potential
+ interference with installed soname symlinks that are required for
+ correct operation of FEATURES=preserve-libs for downgrade operations.
+ It's not necessary for ldconfig to create soname symlinks, since
+ portage will use NEEDED.ELF.2 data to automatically create them
+ after src_install if they happen to be missing.
+ @param makelinks: True if ldconfig should be called, False otherwise
+ @param target_root: root that is passed to the ldconfig -r option,
+ defaults to portage.settings["ROOT"].
+ @type target_root: String (Path)
+ """
+ if writemsg_level is None:
+ writemsg_level = portage.util.writemsg_level
+ if target_root is None:
+ target_root = portage.settings["ROOT"]
+ if prev_mtimes is None:
+ prev_mtimes = portage.mtimedb["ldpath"]
+ if env is None:
+ env = os.environ
+ envd_dir = os.path.join(target_root, "etc", "env.d")
+ ensure_dirs(envd_dir, mode=0o755)
+ fns = listdir(envd_dir, EmptyOnError=1)
+ fns.sort()
+ templist = []
+ for x in fns:
+ if len(x) < 3:
+ continue
+ if not x[0].isdigit() or not x[1].isdigit():
+ continue
+ if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
+ continue
+ templist.append(x)
+ fns = templist
+ del templist
+
+ space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
+ colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
+ "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
+ "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
+ "PYTHONPATH", "ROOTPATH"])
+
+ config_list = []
+
+ for x in fns:
+ file_path = os.path.join(envd_dir, x)
+ try:
+ myconfig = getconfig(file_path, expand=False)
+ except ParseError as e:
+ writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
+ del e
+ continue
+ if myconfig is None:
+ # broken symlink or file removed by a concurrent process
+ writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
+ continue
+
+ config_list.append(myconfig)
+ if "SPACE_SEPARATED" in myconfig:
+ space_separated.update(myconfig["SPACE_SEPARATED"].split())
+ del myconfig["SPACE_SEPARATED"]
+ if "COLON_SEPARATED" in myconfig:
+ colon_separated.update(myconfig["COLON_SEPARATED"].split())
+ del myconfig["COLON_SEPARATED"]
+
+ env = {}
+ specials = {}
+ for var in space_separated:
+ mylist = []
+ for myconfig in config_list:
+ if var in myconfig:
+ for item in myconfig[var].split():
+ if item and not item in mylist:
+ mylist.append(item)
+ del myconfig[var] # prepare for env.update(myconfig)
+ if mylist:
+ env[var] = " ".join(mylist)
+ specials[var] = mylist
+
+ for var in colon_separated:
+ mylist = []
+ for myconfig in config_list:
+ if var in myconfig:
+ for item in myconfig[var].split(":"):
+ if item and not item in mylist:
+ mylist.append(item)
+ del myconfig[var] # prepare for env.update(myconfig)
+ if mylist:
+ env[var] = ":".join(mylist)
+ specials[var] = mylist
+
+ for myconfig in config_list:
+ """Cumulative variables have already been deleted from myconfig so that
+ they won't be overwritten by this dict.update call."""
+ env.update(myconfig)
+
+ ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
+ try:
+ myld = io.open(_unicode_encode(ldsoconf_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ myldlines=myld.readlines()
+ myld.close()
+ oldld=[]
+ for x in myldlines:
+ #each line has at least one char (a newline)
+ if x[:1] == "#":
+ continue
+ oldld.append(x[:-1])
+ except (IOError, OSError) as e:
+ if e.errno != errno.ENOENT:
+ raise
+ oldld = None
+
+ ld_cache_update=False
+
+ newld = specials["LDPATH"]
+ if (oldld != newld):
+ #ld.so.conf needs updating and ldconfig needs to be run
+ myfd = atomic_ofstream(ldsoconf_path)
+ myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
+ myfd.write("# contents of /etc/env.d directory\n")
+ for x in specials["LDPATH"]:
+ myfd.write(x + "\n")
+ myfd.close()
+ ld_cache_update=True
+
+ # Update prelink.conf if we are prelink-enabled
+ if prelink_capable:
+ newprelink = atomic_ofstream(
+ os.path.join(target_root, "etc", "prelink.conf"))
+ newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
+ newprelink.write("# contents of /etc/env.d directory\n")
+
+ for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
+ newprelink.write("-l %s\n" % (x,));
+ prelink_paths = []
+ prelink_paths += specials.get("LDPATH", [])
+ prelink_paths += specials.get("PATH", [])
+ prelink_paths += specials.get("PRELINK_PATH", [])
+ prelink_path_mask = specials.get("PRELINK_PATH_MASK", [])
+ for x in prelink_paths:
+ if not x:
+ continue
+ if x[-1:] != '/':
+ x += "/"
+ plmasked = 0
+ for y in prelink_path_mask:
+ if not y:
+ continue
+ if y[-1] != '/':
+ y += "/"
+ if y == x[0:len(y)]:
+ plmasked = 1
+ break
+ if not plmasked:
+ newprelink.write("-h %s\n" % (x,))
+ for x in prelink_path_mask:
+ newprelink.write("-b %s\n" % (x,))
+ newprelink.close()
+
+ current_time = long(time.time())
+ mtime_changed = False
+ lib_dirs = set()
+ for lib_dir in set(specials["LDPATH"] + \
+ ['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
+ x = os.path.join(target_root, lib_dir.lstrip(os.sep))
+ try:
+ newldpathtime = os.stat(x)[stat.ST_MTIME]
+ lib_dirs.add(normalize_path(x))
+ except OSError as oe:
+ if oe.errno == errno.ENOENT:
+ try:
+ del prev_mtimes[x]
+ except KeyError:
+ pass
+ # ignore this path because it doesn't exist
+ continue
+ raise
+ if newldpathtime == current_time:
+ # Reset mtime to avoid the potential ambiguity of times that
+ # differ by less than 1 second.
+ newldpathtime -= 1
+ os.utime(x, (newldpathtime, newldpathtime))
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+ elif x in prev_mtimes:
+ if prev_mtimes[x] == newldpathtime:
+ pass
+ else:
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+ else:
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+
+ if mtime_changed:
+ ld_cache_update = True
+
+ if makelinks and \
+ not ld_cache_update and \
+ contents is not None:
+ libdir_contents_changed = False
+ for mypath, mydata in contents.items():
+ if mydata[0] not in ("obj", "sym"):
+ continue
+ head, tail = os.path.split(mypath)
+ if head in lib_dirs:
+ libdir_contents_changed = True
+ break
+ if not libdir_contents_changed:
+ makelinks = False
+
+ ldconfig = "/sbin/ldconfig"
+ if "CHOST" in env and "CBUILD" in env and \
+ env["CHOST"] != env["CBUILD"]:
+ ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
+
+ # Only run ldconfig as needed
+ if (ld_cache_update or makelinks) and ldconfig:
+ # ldconfig has very different behaviour between FreeBSD and Linux
+ if ostype == "Linux" or ostype.lower().endswith("gnu"):
+ # We can't update links if we haven't cleaned other versions first, as
+ # an older package installed ON TOP of a newer version will cause ldconfig
+ # to overwrite the symlinks we just made. -X means no links. After 'clean'
+ # we can safely create links.
+ writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
+ (target_root,))
+ os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
+ elif ostype in ("FreeBSD","DragonFly"):
+ writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
+ target_root)
+ os.system(("cd / ; %s -elf -i " + \
+ "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
+ (ldconfig, target_root, target_root))
+
+ del specials["LDPATH"]
+
+ penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
+ penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
+ cenvnotice = penvnotice[:]
+ penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
+ cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
+
+ #create /etc/profile.env for bash support
+ outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
+ outfile.write(penvnotice)
+
+ env_keys = [ x for x in env if x != "LDPATH" ]
+ env_keys.sort()
+ for k in env_keys:
+ v = env[k]
+ if v.startswith('$') and not v.startswith('${'):
+ outfile.write("export %s=$'%s'\n" % (k, v[1:]))
+ else:
+ outfile.write("export %s='%s'\n" % (k, v))
+ outfile.close()
+
+ #create /etc/csh.env for (t)csh support
+ outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
+ outfile.write(cenvnotice)
+ for x in env_keys:
+ outfile.write("setenv %s '%s'\n" % (x, env[x]))
+ outfile.close()
diff --git a/portage_with_autodep/pym/portage/util/lafilefixer.py b/portage_with_autodep/pym/portage/util/lafilefixer.py
new file mode 100644
index 0000000..2b093d8
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/lafilefixer.py
@@ -0,0 +1,185 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os as _os
+import re
+
+from portage import _unicode_decode
+from portage.exception import InvalidData
+
+#########################################################
+# This an re-implementaion of dev-util/lafilefixer-0.5.
+# rewrite_lafile() takes the contents of an lafile as a string
+# It then parses the dependency_libs and inherited_linker_flags
+# entries.
+# We insist on dependency_libs being present. inherited_linker_flags
+# is optional.
+# There are strict rules about the syntax imposed by libtool's libltdl.
+# See 'parse_dotla_file' and 'trim' functions in libltdl/ltdl.c.
+# Note that duplicated entries of dependency_libs and inherited_linker_flags
+# are ignored by libtool (last one wins), but we treat it as error (like
+# lafilefixer does).
+# What it does:
+# * Replaces all .la files with absolut paths in dependency_libs with
+# corresponding -l* and -L* entries
+# (/usr/lib64/libfoo.la -> -L/usr/lib64 -lfoo)
+# * Moves various flags (see flag_re below) to inherited_linker_flags,
+# if such an entry was present.
+# * Reorders dependency_libs such that all -R* entries precede -L* entries
+# and these precede all other entries.
+# * Remove duplicated entries from dependency_libs
+# * Takes care that no entry to inherited_linker_flags is added that is
+# already there.
+#########################################################
+
+#These regexes are used to parse the interesting entries in the la file
+dep_libs_re = re.compile(b"dependency_libs='(?P<value>[^']*)'$")
+inh_link_flags_re = re.compile(b"inherited_linker_flags='(?P<value>[^']*)'$")
+
+#regexes for replacing stuff in -L entries.
+#replace 'X11R6/lib' and 'local/lib' with 'lib', no idea what's this about.
+X11_local_sub = re.compile(b"X11R6/lib|local/lib")
+#get rid of the '..'
+pkgconfig_sub1 = re.compile(b"usr/lib[^/]*/pkgconfig/\.\./\.\.")
+pkgconfig_sub2 = re.compile(b"(?P<usrlib>usr/lib[^/]*)/pkgconfig/\.\.")
+
+#detect flags that should go into inherited_linker_flags instead of dependency_libs
+flag_re = re.compile(b"-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads")
+
+def _parse_lafile_contents(contents):
+ """
+ Parses 'dependency_libs' and 'inherited_linker_flags' lines.
+ """
+
+ dep_libs = None
+ inh_link_flags = None
+
+ for line in contents.split(b"\n"):
+ m = dep_libs_re.match(line)
+ if m:
+ if dep_libs is not None:
+ raise InvalidData("duplicated dependency_libs entry")
+ dep_libs = m.group("value")
+ continue
+
+ m = inh_link_flags_re.match(line)
+ if m:
+ if inh_link_flags is not None:
+ raise InvalidData("duplicated inherited_linker_flags entry")
+ inh_link_flags = m.group("value")
+ continue
+
+ return dep_libs, inh_link_flags
+
+def rewrite_lafile(contents):
+ """
+ Given the contents of an .la file, parse and fix it.
+ This operates with strings of raw bytes (assumed to contain some ascii
+ characters), in order to avoid any potential character encoding issues.
+ Raises 'InvalidData' if the .la file is invalid.
+ @param contents: the contents of a libtool archive file
+ @type contents: bytes
+ @rtype: tuple
+ @returns: (True, fixed_contents) if something needed to be
+ fixed, (False, None) otherwise.
+ """
+ #Parse the 'dependency_libs' and 'inherited_linker_flags' lines.
+ dep_libs, inh_link_flags = \
+ _parse_lafile_contents(contents)
+
+ if dep_libs is None:
+ raise InvalidData("missing or invalid dependency_libs")
+
+ new_dep_libs = []
+ new_inh_link_flags = []
+ librpath = []
+ libladir = []
+
+ if inh_link_flags is not None:
+ new_inh_link_flags = inh_link_flags.split()
+
+ #Check entries in 'dependency_libs'.
+ for dep_libs_entry in dep_libs.split():
+ if dep_libs_entry.startswith(b"-l"):
+ #-lfoo, keep it
+ if dep_libs_entry not in new_dep_libs:
+ new_dep_libs.append(dep_libs_entry)
+
+ elif dep_libs_entry.endswith(b".la"):
+ #Two cases:
+ #1) /usr/lib64/libfoo.la, turn it into -lfoo and append -L/usr/lib64 to libladir
+ #2) libfoo.la, keep it
+ dir, file = _os.path.split(dep_libs_entry)
+
+ if not dir or not file.startswith(b"lib"):
+ if dep_libs_entry not in new_dep_libs:
+ new_dep_libs.append(dep_libs_entry)
+ else:
+ #/usr/lib64/libfoo.la -> -lfoo
+ lib = b"-l" + file[3:-3]
+ if lib not in new_dep_libs:
+ new_dep_libs.append(lib)
+ #/usr/lib64/libfoo.la -> -L/usr/lib64
+ ladir = b"-L" + dir
+ if ladir not in libladir:
+ libladir.append(ladir)
+
+ elif dep_libs_entry.startswith(b"-L"):
+ #Do some replacement magic and store them in 'libladir'.
+ #This allows us to place all -L entries at the beginning
+ #of 'dependency_libs'.
+ ladir = dep_libs_entry
+
+ ladir = X11_local_sub.sub(b"lib", ladir)
+ ladir = pkgconfig_sub1.sub(b"usr", ladir)
+ ladir = pkgconfig_sub2.sub(b"\g<usrlib>", ladir)
+
+ if ladir not in libladir:
+ libladir.append(ladir)
+
+ elif dep_libs_entry.startswith(b"-R"):
+ if dep_libs_entry not in librpath:
+ librpath.append(dep_libs_entry)
+
+ elif flag_re.match(dep_libs_entry):
+ #All this stuff goes into inh_link_flags, if the la file has such an entry.
+ #If it doesn't, they stay in 'dependency_libs'.
+ if inh_link_flags is not None:
+ if dep_libs_entry not in new_inh_link_flags:
+ new_inh_link_flags.append(dep_libs_entry)
+ else:
+ if dep_libs_entry not in new_dep_libs:
+ new_dep_libs.append(dep_libs_entry)
+
+ else:
+ raise InvalidData("Error: Unexpected entry '%s' in 'dependency_libs'" \
+ % _unicode_decode(dep_libs_entry))
+
+ #What should 'dependency_libs' and 'inherited_linker_flags' look like?
+ expected_dep_libs = b""
+ for x in (librpath, libladir, new_dep_libs):
+ if x:
+ expected_dep_libs += b" " + b" ".join(x)
+
+ expected_inh_link_flags = b""
+ if new_inh_link_flags:
+ expected_inh_link_flags += b" " + b" ".join(new_inh_link_flags)
+
+ #Don't touch the file if we don't need to, otherwise put the expected values into
+ #'contents' and write it into the la file.
+
+ changed = False
+ if dep_libs != expected_dep_libs:
+ contents = contents.replace(b"dependency_libs='" + dep_libs + b"'", \
+ b"dependency_libs='" + expected_dep_libs + b"'")
+ changed = True
+
+ if inh_link_flags is not None and expected_inh_link_flags != inh_link_flags:
+ contents = contents.replace(b"inherited_linker_flags='" + inh_link_flags + b"'", \
+ b"inherited_linker_flags='" + expected_inh_link_flags + b"'")
+ changed = True
+
+ if changed:
+ return True, contents
+ else:
+ return False, None
diff --git a/portage_with_autodep/pym/portage/util/listdir.py b/portage_with_autodep/pym/portage/util/listdir.py
new file mode 100644
index 0000000..5753d2f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/listdir.py
@@ -0,0 +1,151 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['cacheddir', 'listdir']
+
+import errno
+import stat
+import time
+
+from portage import os
+from portage.exception import DirectoryNotFound, PermissionDenied, PortageException
+from portage.util import normalize_path, writemsg
+
+_ignorecvs_dirs = ('CVS', 'RCS', 'SCCS', '.svn', '.git')
+dircache = {}
+cacheHit = 0
+cacheMiss = 0
+cacheStale = 0
+
+def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
+ global cacheHit,cacheMiss,cacheStale
+ mypath = normalize_path(my_original_path)
+ if mypath in dircache:
+ cacheHit += 1
+ cached_mtime, list, ftype = dircache[mypath]
+ else:
+ cacheMiss += 1
+ cached_mtime, list, ftype = -1, [], []
+ try:
+ pathstat = os.stat(mypath)
+ if stat.S_ISDIR(pathstat[stat.ST_MODE]):
+ mtime = pathstat.st_mtime
+ else:
+ raise DirectoryNotFound(mypath)
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(mypath)
+ del e
+ return [], []
+ except PortageException:
+ return [], []
+ # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
+ if mtime != cached_mtime or time.time() - mtime < 4:
+ if mypath in dircache:
+ cacheStale += 1
+ try:
+ list = os.listdir(mypath)
+ except EnvironmentError as e:
+ if e.errno != errno.EACCES:
+ raise
+ del e
+ raise PermissionDenied(mypath)
+ ftype = []
+ for x in list:
+ try:
+ if followSymlinks:
+ pathstat = os.stat(mypath+"/"+x)
+ else:
+ pathstat = os.lstat(mypath+"/"+x)
+
+ if stat.S_ISREG(pathstat[stat.ST_MODE]):
+ ftype.append(0)
+ elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
+ ftype.append(1)
+ elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
+ ftype.append(2)
+ else:
+ ftype.append(3)
+ except (IOError, OSError):
+ ftype.append(3)
+ dircache[mypath] = mtime, list, ftype
+
+ ret_list = []
+ ret_ftype = []
+ for x in range(0, len(list)):
+ if list[x] in ignorelist:
+ pass
+ elif ignorecvs:
+ if list[x][:2] != ".#" and \
+ not (ftype[x] == 1 and list[x] in _ignorecvs_dirs):
+ ret_list.append(list[x])
+ ret_ftype.append(ftype[x])
+ else:
+ ret_list.append(list[x])
+ ret_ftype.append(ftype[x])
+
+ writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
+ return ret_list, ret_ftype
+
+def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
+ EmptyOnError=False, dirsonly=False):
+ """
+ Portage-specific implementation of os.listdir
+
+ @param mypath: Path whose contents you wish to list
+ @type mypath: String
+ @param recursive: Recursively scan directories contained within mypath
+ @type recursive: Boolean
+ @param filesonly; Only return files, not more directories
+ @type filesonly: Boolean
+ @param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git')
+ @type ignorecvs: Boolean
+ @param ignorelist: List of filenames/directories to exclude
+ @type ignorelist: List
+ @param followSymlinks: Follow Symlink'd files and directories
+ @type followSymlinks: Boolean
+ @param EmptyOnError: Return [] if an error occurs (deprecated, always True)
+ @type EmptyOnError: Boolean
+ @param dirsonly: Only return directories.
+ @type dirsonly: Boolean
+ @rtype: List
+ @returns: A list of files and directories (or just files or just directories) or an empty list.
+ """
+
+ list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
+
+ if list is None:
+ list=[]
+ if ftype is None:
+ ftype=[]
+
+ if not (filesonly or dirsonly or recursive):
+ return list
+
+ if recursive:
+ x=0
+ while x<len(ftype):
+ if ftype[x] == 1:
+ l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
+ followSymlinks)
+
+ l=l[:]
+ for y in range(0,len(l)):
+ l[y]=list[x]+"/"+l[y]
+ list=list+l
+ ftype=ftype+f
+ x+=1
+ if filesonly:
+ rlist=[]
+ for x in range(0,len(ftype)):
+ if ftype[x]==0:
+ rlist=rlist+[list[x]]
+ elif dirsonly:
+ rlist = []
+ for x in range(0, len(ftype)):
+ if ftype[x] == 1:
+ rlist = rlist + [list[x]]
+ else:
+ rlist=list
+
+ return rlist
diff --git a/portage_with_autodep/pym/portage/util/movefile.py b/portage_with_autodep/pym/portage/util/movefile.py
new file mode 100644
index 0000000..30cb6f1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/movefile.py
@@ -0,0 +1,242 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['movefile']
+
+import errno
+import os as _os
+import shutil as _shutil
+import stat
+
+import portage
+from portage import bsd_chflags, _encodings, _os_overrides, _selinux, \
+ _unicode_decode, _unicode_func_wrapper, _unicode_module_wrapper
+from portage.const import MOVE_BINARY
+from portage.localization import _
+from portage.process import spawn
+from portage.util import writemsg
+
+def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
+ hardlink_candidates=None, encoding=_encodings['fs']):
+ """moves a file from src to dest, preserving all permissions and attributes; mtime will
+ be preserved even when moving across filesystems. Returns true on success and false on
+ failure. Move is atomic."""
+ #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
+
+ if mysettings is None:
+ mysettings = portage.settings
+
+ selinux_enabled = mysettings.selinux_enabled()
+ if selinux_enabled:
+ selinux = _unicode_module_wrapper(_selinux, encoding=encoding)
+
+ lchown = _unicode_func_wrapper(portage.data.lchown, encoding=encoding)
+ os = _unicode_module_wrapper(_os,
+ encoding=encoding, overrides=_os_overrides)
+ shutil = _unicode_module_wrapper(_shutil, encoding=encoding)
+
+ try:
+ if not sstat:
+ sstat=os.lstat(src)
+
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ print(_("!!! Stating source file failed... movefile()"))
+ print("!!!",e)
+ return None
+
+ destexists=1
+ try:
+ dstat=os.lstat(dest)
+ except (OSError, IOError):
+ dstat=os.lstat(os.path.dirname(dest))
+ destexists=0
+
+ if bsd_chflags:
+ if destexists and dstat.st_flags != 0:
+ bsd_chflags.lchflags(dest, 0)
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = os.stat(os.path.dirname(dest)).st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(os.path.dirname(dest), 0)
+
+ if destexists:
+ if stat.S_ISLNK(dstat[stat.ST_MODE]):
+ try:
+ os.unlink(dest)
+ destexists=0
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ pass
+
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ try:
+ target=os.readlink(src)
+ if mysettings and mysettings["D"]:
+ if target.find(mysettings["D"])==0:
+ target=target[len(mysettings["D"]):]
+ if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+ os.unlink(dest)
+ try:
+ if selinux_enabled:
+ selinux.symlink(target, dest, src)
+ else:
+ os.symlink(target, dest)
+ except OSError as e:
+ # Some programs will create symlinks automatically, so we have
+ # to tolerate these links being recreated during the merge
+ # process. In any case, if the link is pointing at the right
+ # place, we're in good shape.
+ if e.errno not in (errno.ENOENT, errno.EEXIST) or \
+ target != os.readlink(dest):
+ raise
+ lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ # utime() only works on the target of a symlink, so it's not
+ # possible to perserve mtime on symlinks.
+ return os.lstat(dest)[stat.ST_MTIME]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ print(_("!!! failed to properly create symlink:"))
+ print("!!!",dest,"->",target)
+ print("!!!",e)
+ return None
+
+ hardlinked = False
+ # Since identical files might be merged to multiple filesystems,
+ # so os.link() calls might fail for some paths, so try them all.
+ # For atomic replacement, first create the link as a temp file
+ # and them use os.rename() to replace the destination.
+ if hardlink_candidates:
+ head, tail = os.path.split(dest)
+ hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
+ (tail, os.getpid()))
+ try:
+ os.unlink(hardlink_tmp)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ writemsg(_("!!! Failed to remove hardlink temp file: %s\n") % \
+ (hardlink_tmp,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+ del e
+ for hardlink_src in hardlink_candidates:
+ try:
+ os.link(hardlink_src, hardlink_tmp)
+ except OSError:
+ continue
+ else:
+ try:
+ os.rename(hardlink_tmp, dest)
+ except OSError as e:
+ writemsg(_("!!! Failed to rename %s to %s\n") % \
+ (hardlink_tmp, dest), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+ hardlinked = True
+ break
+
+ renamefailed=1
+ if hardlinked:
+ renamefailed = False
+ if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
+ try:
+ if selinux_enabled:
+ selinux.rename(src, dest)
+ else:
+ os.rename(src,dest)
+ renamefailed=0
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ # Some random error.
+ print(_("!!! Failed to move %(src)s to %(dest)s") % {"src": src, "dest": dest})
+ print("!!!",e)
+ return None
+ # Invalid cross-device-link 'bind' mounted or actually Cross-Device
+ if renamefailed:
+ didcopy=0
+ if stat.S_ISREG(sstat[stat.ST_MODE]):
+ try: # For safety copy then move it over.
+ if selinux_enabled:
+ selinux.copyfile(src, dest + "#new")
+ selinux.rename(dest + "#new", dest)
+ else:
+ shutil.copyfile(src,dest+"#new")
+ os.rename(dest+"#new",dest)
+ didcopy=1
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ print(_('!!! copy %(src)s -> %(dest)s failed.') % {"src": src, "dest": dest})
+ print("!!!",e)
+ return None
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ a = spawn([MOVE_BINARY, '-f', src, dest], env=os.environ)
+ if a != os.EX_OK:
+ writemsg(_("!!! Failed to move special file:\n"), noiselevel=-1)
+ writemsg(_("!!! '%(src)s' to '%(dest)s'\n") % \
+ {"src": _unicode_decode(src, encoding=encoding),
+ "dest": _unicode_decode(dest, encoding=encoding)}, noiselevel=-1)
+ writemsg("!!! %s\n" % a, noiselevel=-1)
+ return None # failure
+ try:
+ if didcopy:
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ else:
+ os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
+ os.unlink(src)
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ print(_("!!! Failed to chown/chmod/unlink in movefile()"))
+ print("!!!",dest)
+ print("!!!",e)
+ return None
+
+ # Always use stat_obj[stat.ST_MTIME] for the integral timestamp which
+ # is returned, since the stat_obj.st_mtime float attribute rounds *up*
+ # if the nanosecond part of the timestamp is 999999881 ns or greater.
+ try:
+ if hardlinked:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
+ else:
+ # Note: It is not possible to preserve nanosecond precision
+ # (supported in POSIX.1-2008 via utimensat) with the IEEE 754
+ # double precision float which only has a 53 bit significand.
+ if newmtime is not None:
+ os.utime(dest, (newmtime, newmtime))
+ else:
+ newmtime = sstat[stat.ST_MTIME]
+ if renamefailed:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inode are the same. Otherwise, round
+ # down to the nearest whole second since python's float
+ # st_mtime cannot be used to preserve the st_mtim.tv_nsec
+ # field with complete precision. Note that we have to use
+ # stat_obj[stat.ST_MTIME] here because the float
+ # stat_obj.st_mtime rounds *up* sometimes.
+ os.utime(dest, (newmtime, newmtime))
+ except OSError:
+ # The utime can fail here with EPERM even though the move succeeded.
+ # Instead of failing, use stat to return the mtime if possible.
+ try:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
+ except OSError as e:
+ writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
+ writemsg("!!! %s\n" % dest, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ return None
+
+ if bsd_chflags:
+ # Restore the flags we saved before moving
+ if pflags:
+ bsd_chflags.chflags(os.path.dirname(dest), pflags)
+
+ return newmtime
diff --git a/portage_with_autodep/pym/portage/util/mtimedb.py b/portage_with_autodep/pym/portage/util/mtimedb.py
new file mode 100644
index 0000000..67f93e8
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/mtimedb.py
@@ -0,0 +1,81 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['MtimeDB']
+
+import copy
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+import portage
+from portage import _unicode_encode
+from portage.data import portage_gid, uid
+from portage.localization import _
+from portage.util import apply_secpass_permissions, atomic_ofstream, writemsg
+
+class MtimeDB(dict):
+ def __init__(self, filename):
+ dict.__init__(self)
+ self.filename = filename
+ self._load(filename)
+
+ def _load(self, filename):
+ try:
+ f = open(_unicode_encode(filename), 'rb')
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ d = mypickle.load()
+ f.close()
+ del f
+ except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
+ if isinstance(e, pickle.UnpicklingError):
+ writemsg(_("!!! Error loading '%s': %s\n") % \
+ (filename, str(e)), noiselevel=-1)
+ del e
+ d = {}
+
+ if "old" in d:
+ d["updates"] = d["old"]
+ del d["old"]
+ if "cur" in d:
+ del d["cur"]
+
+ d.setdefault("starttime", 0)
+ d.setdefault("version", "")
+ for k in ("info", "ldpath", "updates"):
+ d.setdefault(k, {})
+
+ mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
+ "starttime", "updates", "version"))
+
+ for k in list(d):
+ if k not in mtimedbkeys:
+ writemsg(_("Deleting invalid mtimedb key: %s\n") % str(k))
+ del d[k]
+ self.update(d)
+ self._clean_data = copy.deepcopy(d)
+
+ def commit(self):
+ if not self.filename:
+ return
+ d = {}
+ d.update(self)
+ # Only commit if the internal state has changed.
+ if d != self._clean_data:
+ d["version"] = str(portage.VERSION)
+ try:
+ f = atomic_ofstream(self.filename, mode='wb')
+ except EnvironmentError:
+ pass
+ else:
+ pickle.dump(d, f, protocol=2)
+ f.close()
+ apply_secpass_permissions(self.filename,
+ uid=uid, gid=portage_gid, mode=0o644)
+ self._clean_data = copy.deepcopy(d)
diff --git a/portage_with_autodep/pym/portage/versions.py b/portage_with_autodep/pym/portage/versions.py
new file mode 100644
index 0000000..f8691d1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/versions.py
@@ -0,0 +1,403 @@
+# versions.py -- core Portage functionality
+# Copyright 1998-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+ 'best', 'catpkgsplit', 'catsplit',
+ 'cpv_getkey', 'cpv_getversion', 'cpv_sort_key', 'pkgcmp', 'pkgsplit',
+ 'ververify', 'vercmp'
+]
+
+import re
+import warnings
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:cmp_sort_key'
+)
+from portage.localization import _
+
+# \w is [a-zA-Z0-9_]
+
+# 2.1.1 A category name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_cat = r'[\w+][\w+.-]*'
+
+# 2.1.2 A package name may contain any of the characters [A-Za-z0-9+_-].
+# It must not begin with a hyphen,
+# and must not end in a hyphen followed by one or more digits.
+_pkg = r'[\w+][\w+-]*?'
+
+_v = r'(cvs\.)?(\d+)((\.\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\d*)*)'
+_rev = r'\d+'
+_vr = _v + '(-r(' + _rev + '))?'
+
+_cp = '(' + _cat + '/' + _pkg + '(-' + _vr + ')?)'
+_cpv = '(' + _cp + '-' + _vr + ')'
+_pv = '(?P<pn>' + _pkg + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?'
+
+ver_regexp = re.compile("^" + _vr + "$")
+suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
+suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
+endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
+
+def ververify(myver, silent=1):
+ if ver_regexp.match(myver):
+ return 1
+ else:
+ if not silent:
+ print(_("!!! syntax error in version: %s") % myver)
+ return 0
+
+vercmp_cache = {}
+def vercmp(ver1, ver2, silent=1):
+ """
+ Compare two versions
+ Example usage:
+ >>> from portage.versions import vercmp
+ >>> vercmp('1.0-r1','1.2-r3')
+ negative number
+ >>> vercmp('1.3','1.2-r3')
+ positive number
+ >>> vercmp('1.0_p3','1.0_p3')
+ 0
+
+ @param pkg1: version to compare with (see ver_regexp in portage.versions.py)
+ @type pkg1: string (example: "2.1.2-r3")
+ @param pkg2: version to compare againts (see ver_regexp in portage.versions.py)
+ @type pkg2: string (example: "2.1.2_rc5")
+ @rtype: None or float
+ @return:
+ 1. positive if ver1 is greater than ver2
+ 2. negative if ver1 is less than ver2
+ 3. 0 if ver1 equals ver2
+ 4. None if ver1 or ver2 are invalid (see ver_regexp in portage.versions.py)
+ """
+
+ if ver1 == ver2:
+ return 0
+ mykey=ver1+":"+ver2
+ try:
+ return vercmp_cache[mykey]
+ except KeyError:
+ pass
+ match1 = ver_regexp.match(ver1)
+ match2 = ver_regexp.match(ver2)
+
+ # checking that the versions are valid
+ if not match1 or not match1.groups():
+ if not silent:
+ print(_("!!! syntax error in version: %s") % ver1)
+ return None
+ if not match2 or not match2.groups():
+ if not silent:
+ print(_("!!! syntax error in version: %s") % ver2)
+ return None
+
+ # shortcut for cvs ebuilds (new style)
+ if match1.group(1) and not match2.group(1):
+ vercmp_cache[mykey] = 1
+ return 1
+ elif match2.group(1) and not match1.group(1):
+ vercmp_cache[mykey] = -1
+ return -1
+
+ # building lists of the version parts before the suffix
+ # first part is simple
+ list1 = [int(match1.group(2))]
+ list2 = [int(match2.group(2))]
+
+ # this part would greatly benefit from a fixed-length version pattern
+ if match1.group(3) or match2.group(3):
+ vlist1 = match1.group(3)[1:].split(".")
+ vlist2 = match2.group(3)[1:].split(".")
+
+ for i in range(0, max(len(vlist1), len(vlist2))):
+ # Implcit .0 is given a value of -1, so that 1.0.0 > 1.0, since it
+ # would be ambiguous if two versions that aren't literally equal
+ # are given the same value (in sorting, for example).
+ if len(vlist1) <= i or len(vlist1[i]) == 0:
+ list1.append(-1)
+ list2.append(int(vlist2[i]))
+ elif len(vlist2) <= i or len(vlist2[i]) == 0:
+ list1.append(int(vlist1[i]))
+ list2.append(-1)
+ # Let's make life easy and use integers unless we're forced to use floats
+ elif (vlist1[i][0] != "0" and vlist2[i][0] != "0"):
+ list1.append(int(vlist1[i]))
+ list2.append(int(vlist2[i]))
+ # now we have to use floats so 1.02 compares correctly against 1.1
+ else:
+ # list1.append(float("0."+vlist1[i]))
+ # list2.append(float("0."+vlist2[i]))
+ # Since python floats have limited range, we multiply both
+ # floating point representations by a constant so that they are
+ # transformed into whole numbers. This allows the practically
+ # infinite range of a python int to be exploited. The
+ # multiplication is done by padding both literal strings with
+ # zeros as necessary to ensure equal length.
+ max_len = max(len(vlist1[i]), len(vlist2[i]))
+ list1.append(int(vlist1[i].ljust(max_len, "0")))
+ list2.append(int(vlist2[i].ljust(max_len, "0")))
+
+ # and now the final letter
+ # NOTE: Behavior changed in r2309 (between portage-2.0.x and portage-2.1).
+ # The new behavior is 12.2.5 > 12.2b which, depending on how you look at,
+ # may seem counter-intuitive. However, if you really think about it, it
+ # seems like it's probably safe to assume that this is the behavior that
+ # is intended by anyone who would use versions such as these.
+ if len(match1.group(5)):
+ list1.append(ord(match1.group(5)))
+ if len(match2.group(5)):
+ list2.append(ord(match2.group(5)))
+
+ for i in range(0, max(len(list1), len(list2))):
+ if len(list1) <= i:
+ vercmp_cache[mykey] = -1
+ return -1
+ elif len(list2) <= i:
+ vercmp_cache[mykey] = 1
+ return 1
+ elif list1[i] != list2[i]:
+ a = list1[i]
+ b = list2[i]
+ rval = (a > b) - (a < b)
+ vercmp_cache[mykey] = rval
+ return rval
+
+ # main version is equal, so now compare the _suffix part
+ list1 = match1.group(6).split("_")[1:]
+ list2 = match2.group(6).split("_")[1:]
+
+ for i in range(0, max(len(list1), len(list2))):
+ # Implicit _p0 is given a value of -1, so that 1 < 1_p0
+ if len(list1) <= i:
+ s1 = ("p","-1")
+ else:
+ s1 = suffix_regexp.match(list1[i]).groups()
+ if len(list2) <= i:
+ s2 = ("p","-1")
+ else:
+ s2 = suffix_regexp.match(list2[i]).groups()
+ if s1[0] != s2[0]:
+ a = suffix_value[s1[0]]
+ b = suffix_value[s2[0]]
+ rval = (a > b) - (a < b)
+ vercmp_cache[mykey] = rval
+ return rval
+ if s1[1] != s2[1]:
+ # it's possible that the s(1|2)[1] == ''
+ # in such a case, fudge it.
+ try:
+ r1 = int(s1[1])
+ except ValueError:
+ r1 = 0
+ try:
+ r2 = int(s2[1])
+ except ValueError:
+ r2 = 0
+ rval = (r1 > r2) - (r1 < r2)
+ if rval:
+ vercmp_cache[mykey] = rval
+ return rval
+
+ # the suffix part is equal to, so finally check the revision
+ if match1.group(10):
+ r1 = int(match1.group(10))
+ else:
+ r1 = 0
+ if match2.group(10):
+ r2 = int(match2.group(10))
+ else:
+ r2 = 0
+ rval = (r1 > r2) - (r1 < r2)
+ vercmp_cache[mykey] = rval
+ return rval
+
+def pkgcmp(pkg1, pkg2):
+ """
+ Compare 2 package versions created in pkgsplit format.
+
+ Example usage:
+ >>> from portage.versions import *
+ >>> pkgcmp(pkgsplit('test-1.0-r1'),pkgsplit('test-1.2-r3'))
+ -1
+ >>> pkgcmp(pkgsplit('test-1.3'),pkgsplit('test-1.2-r3'))
+ 1
+
+ @param pkg1: package to compare with
+ @type pkg1: list (example: ['test', '1.0', 'r1'])
+ @param pkg2: package to compare againts
+ @type pkg2: list (example: ['test', '1.0', 'r1'])
+ @rtype: None or integer
+ @return:
+ 1. None if package names are not the same
+ 2. 1 if pkg1 is greater than pkg2
+ 3. -1 if pkg1 is less than pkg2
+ 4. 0 if pkg1 equals pkg2
+ """
+ if pkg1[0] != pkg2[0]:
+ return None
+ return vercmp("-".join(pkg1[1:]), "-".join(pkg2[1:]))
+
+_pv_re = re.compile('^' + _pv + '$', re.VERBOSE)
+
+def _pkgsplit(mypkg):
+ """
+ @param mypkg: pv
+ @return:
+ 1. None if input is invalid.
+ 2. (pn, ver, rev) if input is pv
+ """
+ m = _pv_re.match(mypkg)
+ if m is None:
+ return None
+
+ if m.group('pn_inval') is not None:
+ # package name appears to have a version-like suffix
+ return None
+
+ rev = m.group('rev')
+ if rev is None:
+ rev = '0'
+ rev = 'r' + rev
+
+ return (m.group('pn'), m.group('ver'), rev)
+
+_cat_re = re.compile('^%s$' % _cat)
+_missing_cat = 'null'
+catcache={}
+def catpkgsplit(mydata,silent=1):
+ """
+ Takes a Category/Package-Version-Rev and returns a list of each.
+
+ @param mydata: Data to split
+ @type mydata: string
+ @param silent: suppress error messages
+ @type silent: Boolean (integer)
+ @rype: list
+ @return:
+ 1. If each exists, it returns [cat, pkgname, version, rev]
+ 2. If cat is not specificed in mydata, cat will be "null"
+ 3. if rev does not exist it will be '-r0'
+ """
+
+ try:
+ return catcache[mydata]
+ except KeyError:
+ pass
+ mysplit = mydata.split('/', 1)
+ p_split=None
+ if len(mysplit)==1:
+ cat = _missing_cat
+ p_split = _pkgsplit(mydata)
+ elif len(mysplit)==2:
+ cat = mysplit[0]
+ if _cat_re.match(cat) is not None:
+ p_split = _pkgsplit(mysplit[1])
+ if not p_split:
+ catcache[mydata]=None
+ return None
+ retval = (cat, p_split[0], p_split[1], p_split[2])
+ catcache[mydata]=retval
+ return retval
+
+def pkgsplit(mypkg, silent=1):
+ """
+ @param mypkg: either a pv or cpv
+ @return:
+ 1. None if input is invalid.
+ 2. (pn, ver, rev) if input is pv
+ 3. (cp, ver, rev) if input is a cpv
+ """
+ catpsplit = catpkgsplit(mypkg)
+ if catpsplit is None:
+ return None
+ cat, pn, ver, rev = catpsplit
+ if cat is _missing_cat and '/' not in mypkg:
+ return (pn, ver, rev)
+ else:
+ return (cat + '/' + pn, ver, rev)
+
+def cpv_getkey(mycpv):
+ """Calls catpkgsplit on a cpv and returns only the cp."""
+ mysplit = catpkgsplit(mycpv)
+ if mysplit is not None:
+ return mysplit[0] + '/' + mysplit[1]
+
+ warnings.warn("portage.versions.cpv_getkey() " + \
+ "called with invalid cpv: '%s'" % (mycpv,),
+ DeprecationWarning, stacklevel=2)
+
+ myslash = mycpv.split("/", 1)
+ mysplit = _pkgsplit(myslash[-1])
+ if mysplit is None:
+ return None
+ mylen = len(myslash)
+ if mylen == 2:
+ return myslash[0] + "/" + mysplit[0]
+ else:
+ return mysplit[0]
+
+def cpv_getversion(mycpv):
+ """Returns the v (including revision) from an cpv."""
+ cp = cpv_getkey(mycpv)
+ if cp is None:
+ return None
+ return mycpv[len(cp+"-"):]
+
+def cpv_sort_key():
+ """
+ Create an object for sorting cpvs, to be used as the 'key' parameter
+ in places like list.sort() or sorted(). This calls catpkgsplit() once for
+ each cpv and caches the result. If a given cpv is invalid or two cpvs
+ have different category/package names, then plain string (> and <)
+ comparison is used.
+
+ @rtype: key object for sorting
+ @return: object for use as the 'key' parameter in places like
+ list.sort() or sorted()
+ """
+
+ split_cache = {}
+
+ def cmp_cpv(cpv1, cpv2):
+
+ split1 = split_cache.get(cpv1, False)
+ if split1 is False:
+ split1 = catpkgsplit(cpv1)
+ if split1 is not None:
+ split1 = (split1[:2], '-'.join(split1[2:]))
+ split_cache[cpv1] = split1
+
+ split2 = split_cache.get(cpv2, False)
+ if split2 is False:
+ split2 = catpkgsplit(cpv2)
+ if split2 is not None:
+ split2 = (split2[:2], '-'.join(split2[2:]))
+ split_cache[cpv2] = split2
+
+ if split1 is None or split2 is None or split1[0] != split2[0]:
+ return (cpv1 > cpv2) - (cpv1 < cpv2)
+
+ return vercmp(split1[1], split2[1])
+
+ return cmp_sort_key(cmp_cpv)
+
+def catsplit(mydep):
+ return mydep.split("/", 1)
+
+def best(mymatches):
+ """Accepts None arguments; assumes matches are valid."""
+ if not mymatches:
+ return ""
+ if len(mymatches) == 1:
+ return mymatches[0]
+ bestmatch = mymatches[0]
+ p2 = catpkgsplit(bestmatch)[1:]
+ for x in mymatches[1:]:
+ p1 = catpkgsplit(x)[1:]
+ if pkgcmp(p1, p2) > 0:
+ bestmatch = x
+ p2 = catpkgsplit(bestmatch)[1:]
+ return bestmatch
diff --git a/portage_with_autodep/pym/portage/xml/__init__.py b/portage_with_autodep/pym/portage/xml/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/xml/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/portage_with_autodep/pym/portage/xml/metadata.py b/portage_with_autodep/pym/portage/xml/metadata.py
new file mode 100644
index 0000000..7acc1f3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/xml/metadata.py
@@ -0,0 +1,376 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Provides an easy-to-use python interface to Gentoo's metadata.xml file.
+
+ Example usage:
+ >>> from portage.xml.metadata import MetaDataXML
+ >>> pkg_md = MetaDataXML('/usr/portage/app-misc/gourmet/metadata.xml')
+ >>> pkg_md
+ <MetaDataXML '/usr/portage/app-misc/gourmet/metadata.xml'>
+ >>> pkg_md.herds()
+ ['no-herd']
+ >>> for maint in pkg_md.maintainers():
+ ... print "{0} ({1})".format(maint.email, maint.name)
+ ...
+ nixphoeni@gentoo.org (Joe Sapp)
+ >>> for flag in pkg_md.use():
+ ... print flag.name, "->", flag.description
+ ...
+ rtf -> Enable export to RTF
+ gnome-print -> Enable printing support using gnome-print
+ >>> upstream = pkg_md.upstream()
+ >>> upstream
+ [<_Upstream {'docs': [], 'remoteid': [], 'maintainer':
+ [<_Maintainer 'Thomas_Hinkle@alumni.brown.edu'>], 'bugtracker': [],
+ 'changelog': []}>]
+ >>> upstream[0].maintainer[0].name
+ 'Thomas Mills Hinkle'
+"""
+
+__all__ = ('MetaDataXML',)
+
+try:
+ import xml.etree.cElementTree as etree
+except ImportError:
+ import xml.etree.ElementTree as etree
+
+import re
+import portage
+from portage import os
+from portage.util import unique_everseen
+
+class _Maintainer(object):
+ """An object for representing one maintainer.
+
+ @type email: str or None
+ @ivar email: Maintainer's email address. Used for both Gentoo and upstream.
+ @type name: str or None
+ @ivar name: Maintainer's name. Used for both Gentoo and upstream.
+ @type description: str or None
+ @ivar description: Description of what a maintainer does. Gentoo only.
+ @type restrict: str or None
+ @ivar restrict: e.g. &gt;=portage-2.2 means only maintains versions
+ of Portage greater than 2.2. Should be DEPEND string with < and >
+ converted to &lt; and &gt; respectively.
+ @type status: str or None
+ @ivar status: If set, either 'active' or 'inactive'. Upstream only.
+ """
+
+ def __init__(self, node):
+ self.email = None
+ self.name = None
+ self.description = None
+ self.restrict = node.get('restrict')
+ self.status = node.get('status')
+ maint_attrs = node.getchildren()
+ for attr in maint_attrs:
+ setattr(self, attr.tag, attr.text)
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.email)
+
+
+class _Useflag(object):
+ """An object for representing one USE flag.
+
+ @todo: Is there any way to have a keyword option to leave in
+ <pkg> and <cat> for later processing?
+ @type name: str or None
+ @ivar name: USE flag
+ @type restrict: str or None
+ @ivar restrict: e.g. &gt;=portage-2.2 means flag is only available in
+ versions greater than 2.2
+ @type description: str
+ @ivar description: description of the USE flag
+ """
+
+ def __init__(self, node):
+ self.name = node.get('name')
+ self.restrict = node.get('restrict')
+ _desc = ''
+ if node.text:
+ _desc = node.text
+ for child in node.getchildren():
+ _desc += child.text if child.text else ''
+ _desc += child.tail if child.tail else ''
+ # This takes care of tabs and newlines left from the file
+ self.description = re.sub('\s+', ' ', _desc)
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.name)
+
+
+class _Upstream(object):
+ """An object for representing one package's upstream.
+
+ @type maintainers: list
+ @ivar maintainers: L{_Maintainer} objects for each upstream maintainer
+ @type changelogs: list
+ @ivar changelogs: URLs to upstream's ChangeLog file in str format
+ @type docs: list
+ @ivar docs: Sequence of tuples containing URLs to upstream documentation
+ in the first slot and 'lang' attribute in the second, e.g.,
+ [('http.../docs/en/tut.html', None), ('http.../doc/fr/tut.html', 'fr')]
+ @type bugtrackers: list
+ @ivar bugtrackers: URLs to upstream's bugtracker. May also contain an email
+ address if prepended with 'mailto:'
+ @type remoteids: list
+ @ivar remoteids: Sequence of tuples containing the project's hosting site
+ name in the first slot and the project's ID name or number for that
+ site in the second, e.g., [('sourceforge', 'systemrescuecd')]
+ """
+
+ def __init__(self, node):
+ self.node = node
+ self.maintainers = self.upstream_maintainers()
+ self.changelogs = self.upstream_changelogs()
+ self.docs = self.upstream_documentation()
+ self.bugtrackers = self.upstream_bugtrackers()
+ self.remoteids = self.upstream_remoteids()
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.__dict__)
+
+ def upstream_bugtrackers(self):
+ """Retrieve upstream bugtracker location from xml node."""
+ return [e.text for e in self.node.findall('bugs-to')]
+
+ def upstream_changelogs(self):
+ """Retrieve upstream changelog location from xml node."""
+ return [e.text for e in self.node.findall('changelog')]
+
+ def upstream_documentation(self):
+ """Retrieve upstream documentation location from xml node."""
+ result = []
+ for elem in self.node.findall('doc'):
+ lang = elem.get('lang')
+ result.append((elem.text, lang))
+ return result
+
+ def upstream_maintainers(self):
+ """Retrieve upstream maintainer information from xml node."""
+ return [_Maintainer(m) for m in self.node.findall('maintainer')]
+
+ def upstream_remoteids(self):
+ """Retrieve upstream remote ID from xml node."""
+ return [(e.text, e.get('type')) for e in self.node.findall('remote-id')]
+
+
+class MetaDataXML(object):
+ """Access metadata.xml"""
+
+ def __init__(self, metadata_xml_path, herds):
+ """Parse a valid metadata.xml file.
+
+ @type metadata_xml_path: str
+ @param metadata_xml_path: path to a valid metadata.xml file
+ @type herds: str or ElementTree
+ @param herds: path to a herds.xml, or a pre-parsed ElementTree
+ @raise IOError: if C{metadata_xml_path} can not be read
+ """
+
+ self.metadata_xml_path = metadata_xml_path
+ self._xml_tree = None
+
+ try:
+ self._xml_tree = etree.parse(metadata_xml_path)
+ except ImportError:
+ pass
+
+ if isinstance(herds, etree.ElementTree):
+ herds_etree = herds
+ herds_path = None
+ else:
+ herds_etree = None
+ herds_path = herds
+
+ # Used for caching
+ self._herdstree = herds_etree
+ self._herds_path = herds_path
+ self._descriptions = None
+ self._maintainers = None
+ self._herds = None
+ self._useflags = None
+ self._upstream = None
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.metadata_xml_path)
+
+ def _get_herd_email(self, herd):
+ """Get a herd's email address.
+
+ @type herd: str
+ @param herd: herd whose email you want
+ @rtype: str or None
+ @return: email address or None if herd is not in herds.xml
+ @raise IOError: if $PORTDIR/metadata/herds.xml can not be read
+ """
+
+ if self._herdstree is None:
+ try:
+ self._herdstree = etree.parse(self._herds_path)
+ except (ImportError, IOError, SyntaxError):
+ return None
+
+ # Some special herds are not listed in herds.xml
+ if herd in ('no-herd', 'maintainer-wanted', 'maintainer-needed'):
+ return None
+
+ for node in self._herdstree.getiterator('herd'):
+ if node.findtext('name') == herd:
+ return node.findtext('email')
+
+ def herds(self, include_email=False):
+ """Return a list of text nodes for <herd>.
+
+ @type include_email: bool
+ @keyword include_email: if True, also look up the herd's email
+ @rtype: tuple
+ @return: if include_email is False, return a list of strings;
+ if include_email is True, return a list of tuples containing:
+ [('herd1', 'herd1@gentoo.org'), ('no-herd', None);
+ """
+ if self._herds is None:
+ if self._xml_tree is None:
+ self._herds = tuple()
+ else:
+ herds = []
+ for elem in self._xml_tree.findall('herd'):
+ text = elem.text
+ if text is None:
+ text = ''
+ if include_email:
+ herd_mail = self._get_herd_email(text)
+ herds.append((text, herd_mail))
+ else:
+ herds.append(text)
+ self._herds = tuple(herds)
+
+ return self._herds
+
+ def descriptions(self):
+ """Return a list of text nodes for <longdescription>.
+
+ @rtype: list
+ @return: package description in string format
+ @todo: Support the C{lang} attribute
+ """
+ if self._descriptions is None:
+ if self._xml_tree is None:
+ self._descriptions = tuple()
+ else:
+ self._descriptions = tuple(e.text \
+ for e in self._xml_tree.findall("longdescription"))
+
+ return self._descriptions
+
+ def maintainers(self):
+ """Get maintainers' name, email and description.
+
+ @rtype: list
+ @return: a sequence of L{_Maintainer} objects in document order.
+ """
+
+ if self._maintainers is None:
+ if self._xml_tree is None:
+ self._maintainers = tuple()
+ else:
+ self._maintainers = tuple(_Maintainer(node) \
+ for node in self._xml_tree.findall('maintainer'))
+
+ return self._maintainers
+
+ def use(self):
+ """Get names and descriptions for USE flags defined in metadata.
+
+ @rtype: list
+ @return: a sequence of L{_Useflag} objects in document order.
+ """
+
+ if self._useflags is None:
+ if self._xml_tree is None:
+ self._useflags = tuple()
+ else:
+ self._useflags = tuple(_Useflag(node) \
+ for node in self._xml_tree.getiterator('flag'))
+
+ return self._useflags
+
+ def upstream(self):
+ """Get upstream contact information.
+
+ @rtype: list
+ @return: a sequence of L{_Upstream} objects in document order.
+ """
+
+ if self._upstream is None:
+ if self._xml_tree is None:
+ self._upstream = tuple()
+ else:
+ self._upstream = tuple(_Upstream(node) \
+ for node in self._xml_tree.findall('upstream'))
+
+ return self._upstream
+
+ def format_maintainer_string(self):
+ """Format string containing maintainers and herds (emails if possible).
+ Used by emerge to display maintainer information.
+ Entries are sorted according to the rules stated on the bug wranglers page.
+
+ @rtype: String
+ @return: a string containing maintainers and herds
+ """
+ maintainers = []
+ for maintainer in self.maintainers():
+ if maintainer.email is None or not maintainer.email.strip():
+ if maintainer.name and maintainer.name.strip():
+ maintainers.append(maintainer.name)
+ else:
+ maintainers.append(maintainer.email)
+
+ for herd, email in self.herds(include_email=True):
+ if herd == "no-herd":
+ continue
+ if email is None or not email.strip():
+ if herd and herd.strip():
+ maintainers.append(herd)
+ else:
+ maintainers.append(email)
+
+ maintainers = list(unique_everseen(maintainers))
+
+ maint_str = ""
+ if maintainers:
+ maint_str = maintainers[0]
+ maintainers = maintainers[1:]
+ if maintainers:
+ maint_str += " " + ",".join(maintainers)
+
+ return maint_str
+
+ def format_upstream_string(self):
+ """Format string containing upstream maintainers and bugtrackers.
+ Used by emerge to display upstream information.
+
+ @rtype: String
+ @return: a string containing upstream maintainers and bugtrackers
+ """
+ maintainers = []
+ for upstream in self.upstream():
+ for maintainer in upstream.maintainers:
+ if maintainer.email is None or not maintainer.email.strip():
+ if maintainer.name and maintainer.name.strip():
+ maintainers.append(maintainer.name)
+ else:
+ maintainers.append(maintainer.email)
+
+ for bugtracker in upstream.bugtrackers:
+ if bugtracker.startswith("mailto:"):
+ bugtracker = bugtracker[7:]
+ maintainers.append(bugtracker)
+
+
+ maintainers = list(unique_everseen(maintainers))
+ maint_str = " ".join(maintainers)
+ return maint_str
diff --git a/portage_with_autodep/pym/portage/xpak.py b/portage_with_autodep/pym/portage/xpak.py
new file mode 100644
index 0000000..7487d67
--- /dev/null
+++ b/portage_with_autodep/pym/portage/xpak.py
@@ -0,0 +1,497 @@
+# Copyright 2001-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+# The format for a tbz2/xpak:
+#
+# tbz2: tar.bz2 + xpak + (xpak_offset) + "STOP"
+# xpak: "XPAKPACK" + (index_len) + (data_len) + index + data + "XPAKSTOP"
+# index: (pathname_len) + pathname + (data_offset) + (data_len)
+# index entries are concatenated end-to-end.
+# data: concatenated data chunks, end-to-end.
+#
+# [tarball]XPAKPACKIIIIDDDD[index][data]XPAKSTOPOOOOSTOP
+#
+# (integer) == encodeint(integer) ===> 4 characters (big-endian copy)
+# '+' means concatenate the fields ===> All chunks are strings
+
+__all__ = ['addtolist', 'decodeint', 'encodeint', 'getboth',
+ 'getindex', 'getindex_mem', 'getitem', 'listindex',
+ 'searchindex', 'tbz2', 'xpak_mem', 'xpak', 'xpand',
+ 'xsplit', 'xsplit_mem']
+
+import array
+import errno
+import shutil
+import sys
+
+import portage
+from portage import os
+from portage import normalize_path
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+def addtolist(mylist, curdir):
+ """(list, dir) --- Takes an array(list) and appends all files from dir down
+ the directory tree. Returns nothing. list is modified."""
+ curdir = normalize_path(_unicode_decode(curdir,
+ encoding=_encodings['fs'], errors='strict'))
+ for parent, dirs, files in os.walk(curdir):
+
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ if parent != curdir:
+ mylist.append(parent[len(curdir) + 1:] + os.sep)
+
+ for x in dirs:
+ try:
+ _unicode_decode(x, encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ dirs.remove(x)
+
+ for x in files:
+ try:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ mylist.append(os.path.join(parent, x)[len(curdir) + 1:])
+
+def encodeint(myint):
+ """Takes a 4 byte integer and converts it into a string of 4 characters.
+ Returns the characters in a string."""
+ a = array.array('B')
+ a.append((myint >> 24 ) & 0xff)
+ a.append((myint >> 16 ) & 0xff)
+ a.append((myint >> 8 ) & 0xff)
+ a.append(myint & 0xff)
+ return a.tostring()
+
+def decodeint(mystring):
+ """Takes a 4 byte string and converts it into a 4 byte integer.
+ Returns an integer."""
+ if sys.hexversion < 0x3000000:
+ mystring = [ord(x) for x in mystring]
+ myint = 0
+ myint += mystring[3]
+ myint += mystring[2] << 8
+ myint += mystring[1] << 16
+ myint += mystring[0] << 24
+ return myint
+
+def xpak(rootdir,outfile=None):
+ """(rootdir,outfile) -- creates an xpak segment of the directory 'rootdir'
+ and under the name 'outfile' if it is specified. Otherwise it returns the
+ xpak segment."""
+
+ mylist=[]
+
+ addtolist(mylist, rootdir)
+ mylist.sort()
+ mydata = {}
+ for x in mylist:
+ if x == 'CONTENTS':
+ # CONTENTS is generated during the merge process.
+ continue
+ x = _unicode_encode(x, encoding=_encodings['fs'], errors='strict')
+ mydata[x] = open(os.path.join(rootdir, x), 'rb').read()
+
+ xpak_segment = xpak_mem(mydata)
+ if outfile:
+ outf = open(_unicode_encode(outfile,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ outf.write(xpak_segment)
+ outf.close()
+ else:
+ return xpak_segment
+
+def xpak_mem(mydata):
+ """Create an xpack segement from a map object."""
+
+ mydata_encoded = {}
+ for k, v in mydata.items():
+ k = _unicode_encode(k,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ v = _unicode_encode(v,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mydata_encoded[k] = v
+ mydata = mydata_encoded
+ del mydata_encoded
+
+ indexglob = b''
+ indexpos=0
+ dataglob = b''
+ datapos=0
+ for x, newglob in mydata.items():
+ mydatasize=len(newglob)
+ indexglob=indexglob+encodeint(len(x))+x+encodeint(datapos)+encodeint(mydatasize)
+ indexpos=indexpos+4+len(x)+4+4
+ dataglob=dataglob+newglob
+ datapos=datapos+mydatasize
+ return b'XPAKPACK' \
+ + encodeint(len(indexglob)) \
+ + encodeint(len(dataglob)) \
+ + indexglob \
+ + dataglob \
+ + b'XPAKSTOP'
+
+def xsplit(infile):
+ """(infile) -- Splits the infile into two files.
+ 'infile.index' contains the index segment.
+ 'infile.dat' contails the data segment."""
+ infile = _unicode_decode(infile,
+ encoding=_encodings['fs'], errors='strict')
+ myfile = open(_unicode_encode(infile,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mydat=myfile.read()
+ myfile.close()
+
+ splits = xsplit_mem(mydat)
+ if not splits:
+ return False
+
+ myfile = open(_unicode_encode(infile + '.index',
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ myfile.write(splits[0])
+ myfile.close()
+ myfile = open(_unicode_encode(infile + '.dat',
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ myfile.write(splits[1])
+ myfile.close()
+ return True
+
+def xsplit_mem(mydat):
+ if mydat[0:8] != b'XPAKPACK':
+ return None
+ if mydat[-8:] != b'XPAKSTOP':
+ return None
+ indexsize=decodeint(mydat[8:12])
+ return (mydat[16:indexsize+16], mydat[indexsize+16:-8])
+
+def getindex(infile):
+ """(infile) -- grabs the index segment from the infile and returns it."""
+ myfile = open(_unicode_encode(infile,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ myheader=myfile.read(16)
+ if myheader[0:8] != b'XPAKPACK':
+ myfile.close()
+ return
+ indexsize=decodeint(myheader[8:12])
+ myindex=myfile.read(indexsize)
+ myfile.close()
+ return myindex
+
+def getboth(infile):
+ """(infile) -- grabs the index and data segments from the infile.
+ Returns an array [indexSegment,dataSegment]"""
+ myfile = open(_unicode_encode(infile,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ myheader=myfile.read(16)
+ if myheader[0:8] != b'XPAKPACK':
+ myfile.close()
+ return
+ indexsize=decodeint(myheader[8:12])
+ datasize=decodeint(myheader[12:16])
+ myindex=myfile.read(indexsize)
+ mydata=myfile.read(datasize)
+ myfile.close()
+ return myindex, mydata
+
+def listindex(myindex):
+ """Print to the terminal the filenames listed in the indexglob passed in."""
+ for x in getindex_mem(myindex):
+ print(x)
+
+def getindex_mem(myindex):
+ """Returns the filenames listed in the indexglob passed in."""
+ myindexlen=len(myindex)
+ startpos=0
+ myret=[]
+ while ((startpos+8)<myindexlen):
+ mytestlen=decodeint(myindex[startpos:startpos+4])
+ myret=myret+[myindex[startpos+4:startpos+4+mytestlen]]
+ startpos=startpos+mytestlen+12
+ return myret
+
+def searchindex(myindex,myitem):
+ """(index,item) -- Finds the offset and length of the file 'item' in the
+ datasegment via the index 'index' provided."""
+ myitem = _unicode_encode(myitem,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mylen=len(myitem)
+ myindexlen=len(myindex)
+ startpos=0
+ while ((startpos+8)<myindexlen):
+ mytestlen=decodeint(myindex[startpos:startpos+4])
+ if mytestlen==mylen:
+ if myitem==myindex[startpos+4:startpos+4+mytestlen]:
+ #found
+ datapos=decodeint(myindex[startpos+4+mytestlen:startpos+8+mytestlen]);
+ datalen=decodeint(myindex[startpos+8+mytestlen:startpos+12+mytestlen]);
+ return datapos, datalen
+ startpos=startpos+mytestlen+12
+
+def getitem(myid,myitem):
+ myindex=myid[0]
+ mydata=myid[1]
+ myloc=searchindex(myindex,myitem)
+ if not myloc:
+ return None
+ return mydata[myloc[0]:myloc[0]+myloc[1]]
+
+def xpand(myid,mydest):
+ myindex=myid[0]
+ mydata=myid[1]
+ try:
+ origdir=os.getcwd()
+ except SystemExit as e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ os.chdir(mydest)
+ myindexlen=len(myindex)
+ startpos=0
+ while ((startpos+8)<myindexlen):
+ namelen=decodeint(myindex[startpos:startpos+4])
+ datapos=decodeint(myindex[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(myindex[startpos+8+namelen:startpos+12+namelen]);
+ myname=myindex[startpos+4:startpos+4+namelen]
+ dirname=os.path.dirname(myname)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat = open(_unicode_encode(myname,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ mydat.write(mydata[datapos:datapos+datalen])
+ mydat.close()
+ startpos=startpos+namelen+12
+ os.chdir(origdir)
+
+class tbz2(object):
+ def __init__(self,myfile):
+ self.file=myfile
+ self.filestat=None
+ self.index = b''
+ self.infosize=0
+ self.xpaksize=0
+ self.indexsize=None
+ self.datasize=None
+ self.indexpos=None
+ self.datapos=None
+
+ def decompose(self,datadir,cleanup=1):
+ """Alias for unpackinfo() --- Complement to recompose() but optionally
+ deletes the destination directory. Extracts the xpak from the tbz2 into
+ the directory provided. Raises IOError if scan() fails.
+ Returns result of upackinfo()."""
+ if not self.scan():
+ raise IOError
+ if cleanup:
+ self.cleanup(datadir)
+ if not os.path.exists(datadir):
+ os.makedirs(datadir)
+ return self.unpackinfo(datadir)
+ def compose(self,datadir,cleanup=0):
+ """Alias for recompose()."""
+ return self.recompose(datadir,cleanup)
+
+ def recompose(self, datadir, cleanup=0, break_hardlinks=True):
+ """Creates an xpak segment from the datadir provided, truncates the tbz2
+ to the end of regular data if an xpak segment already exists, and adds
+ the new segment to the file with terminating info."""
+ xpdata = xpak(datadir)
+ self.recompose_mem(xpdata, break_hardlinks=break_hardlinks)
+ if cleanup:
+ self.cleanup(datadir)
+
+ def recompose_mem(self, xpdata, break_hardlinks=True):
+ """
+ Update the xpak segment.
+ @param xpdata: A new xpak segment to be written, like that returned
+ from the xpak_mem() function.
+ @param break_hardlinks: If hardlinks exist, create a copy in order
+ to break them. This makes it safe to use hardlinks to create
+ cheap snapshots of the repository, which is useful for solving
+ race conditions on binhosts as described here:
+ http://code.google.com/p/chromium-os/issues/detail?id=3225.
+ Default is True.
+ """
+ self.scan() # Don't care about condition... We'll rewrite the data anyway.
+
+ if break_hardlinks and self.filestat.st_nlink > 1:
+ tmp_fname = "%s.%d" % (self.file, os.getpid())
+ shutil.copyfile(self.file, tmp_fname)
+ try:
+ portage.util.apply_stat_permissions(self.file, self.filestat)
+ except portage.exception.OperationNotPermitted:
+ pass
+ os.rename(tmp_fname, self.file)
+
+ myfile = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'ab+')
+ if not myfile:
+ raise IOError
+ myfile.seek(-self.xpaksize,2) # 0,2 or -0,2 just mean EOF.
+ myfile.truncate()
+ myfile.write(xpdata+encodeint(len(xpdata)) + b'STOP')
+ myfile.flush()
+ myfile.close()
+ return 1
+
+ def cleanup(self, datadir):
+ datadir_split = os.path.split(datadir)
+ if len(datadir_split) >= 2 and len(datadir_split[1]) > 0:
+ # This is potentially dangerous,
+ # thus the above sanity check.
+ try:
+ shutil.rmtree(datadir)
+ except OSError as oe:
+ if oe.errno == errno.ENOENT:
+ pass
+ else:
+ raise oe
+
+ def scan(self):
+ """Scans the tbz2 to locate the xpak segment and setup internal values.
+ This function is called by relevant functions already."""
+ try:
+ mystat=os.stat(self.file)
+ if self.filestat:
+ changed=0
+ if mystat.st_size != self.filestat.st_size \
+ or mystat.st_mtime != self.filestat.st_mtime \
+ or mystat.st_ctime != self.filestat.st_ctime:
+ changed = True
+ if not changed:
+ return 1
+ self.filestat=mystat
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ a.seek(-16,2)
+ trailer=a.read()
+ self.infosize=0
+ self.xpaksize=0
+ if trailer[-4:] != b'STOP':
+ a.close()
+ return 0
+ if trailer[0:8] != b'XPAKSTOP':
+ a.close()
+ return 0
+ self.infosize=decodeint(trailer[8:12])
+ self.xpaksize=self.infosize+8
+ a.seek(-(self.xpaksize),2)
+ header=a.read(16)
+ if header[0:8] != b'XPAKPACK':
+ a.close()
+ return 0
+ self.indexsize=decodeint(header[8:12])
+ self.datasize=decodeint(header[12:16])
+ self.indexpos=a.tell()
+ self.index=a.read(self.indexsize)
+ self.datapos=a.tell()
+ a.close()
+ return 2
+ except SystemExit as e:
+ raise
+ except:
+ return 0
+
+ def filelist(self):
+ """Return an array of each file listed in the index."""
+ if not self.scan():
+ return None
+ return getindex_mem(self.index)
+
+ def getfile(self,myfile,mydefault=None):
+ """Finds 'myfile' in the data segment and returns it."""
+ if not self.scan():
+ return None
+ myresult=searchindex(self.index,myfile)
+ if not myresult:
+ return mydefault
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ a.seek(self.datapos+myresult[0],0)
+ myreturn=a.read(myresult[1])
+ a.close()
+ return myreturn
+
+ def getelements(self,myfile):
+ """A split/array representation of tbz2.getfile()"""
+ mydat=self.getfile(myfile)
+ if not mydat:
+ return []
+ return mydat.split()
+
+ def unpackinfo(self,mydest):
+ """Unpacks all the files from the dataSegment into 'mydest'."""
+ if not self.scan():
+ return 0
+ try:
+ origdir=os.getcwd()
+ except SystemExit as e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ if not os.path.exists(mydest):
+ os.makedirs(mydest)
+ os.chdir(mydest)
+ startpos=0
+ while ((startpos+8)<self.indexsize):
+ namelen=decodeint(self.index[startpos:startpos+4])
+ datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
+ myname=self.index[startpos+4:startpos+4+namelen]
+ myname = _unicode_decode(myname,
+ encoding=_encodings['repo.content'], errors='replace')
+ dirname=os.path.dirname(myname)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat = open(_unicode_encode(myname,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ a.seek(self.datapos+datapos)
+ mydat.write(a.read(datalen))
+ mydat.close()
+ startpos=startpos+namelen+12
+ a.close()
+ os.chdir(origdir)
+ return 1
+
+ def get_data(self):
+ """Returns all the files from the dataSegment as a map object."""
+ if not self.scan():
+ return {}
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mydata = {}
+ startpos=0
+ while ((startpos+8)<self.indexsize):
+ namelen=decodeint(self.index[startpos:startpos+4])
+ datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
+ myname=self.index[startpos+4:startpos+4+namelen]
+ a.seek(self.datapos+datapos)
+ mydata[myname] = a.read(datalen)
+ startpos=startpos+namelen+12
+ a.close()
+ return mydata
+
+ def getboth(self):
+ """Returns an array [indexSegment,dataSegment]"""
+ if not self.scan():
+ return None
+
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ a.seek(self.datapos)
+ mydata =a.read(self.datasize)
+ a.close()
+
+ return self.index, mydata
+