aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Bersenev <bay@hackerdom.ru>2011-08-21 17:35:50 +0000
committerAlexander Bersenev <bay@hackerdom.ru>2011-08-21 17:35:50 +0000
commit91ffc6c50001d41fe1d16981baa32fb557463375 (patch)
tree393551fe844a9c7ee030ad71efe03a92b76ac569
parentportage integration patch is added (diff)
downloadautodep-91ffc6c50001d41fe1d16981baa32fb557463375.tar.gz
autodep-91ffc6c50001d41fe1d16981baa32fb557463375.tar.bz2
autodep-91ffc6c50001d41fe1d16981baa32fb557463375.zip
add a patched version of portage
-rwxr-xr-xportage_with_autodep/bin/archive-conf111
-rwxr-xr-xportage_with_autodep/bin/banned-helper6
-rwxr-xr-xportage_with_autodep/bin/binhost-snapshot142
-rwxr-xr-xportage_with_autodep/bin/check-implicit-pointer-usage.py84
-rwxr-xr-xportage_with_autodep/bin/clean_locks47
-rwxr-xr-xportage_with_autodep/bin/dispatch-conf434
-rwxr-xr-xportage_with_autodep/bin/dohtml.py191
-rwxr-xr-xportage_with_autodep/bin/ebuild346
l---------portage_with_autodep/bin/ebuild-helpers/4/dodoc1
l---------portage_with_autodep/bin/ebuild-helpers/4/dohard1
l---------portage_with_autodep/bin/ebuild-helpers/4/dosed1
l---------portage_with_autodep/bin/ebuild-helpers/4/prepalldocs1
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/die7
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dobin29
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/doconfd14
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dodir10
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dodoc31
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/doenvd14
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/doexe43
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dohard13
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dohtml14
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/doinfo24
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/doinitd14
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/doins155
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dolib43
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dolib.a6
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dolib.so6
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/doman64
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/domo34
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dosbin29
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dosed35
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/dosym18
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/ecompress161
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/ecompressdir143
l---------portage_with_autodep/bin/ebuild-helpers/eerror1
l---------portage_with_autodep/bin/ebuild-helpers/einfo1
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/elog7
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/emake28
l---------portage_with_autodep/bin/ebuild-helpers/eqawarn1
l---------portage_with_autodep/bin/ebuild-helpers/ewarn1
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/fowners13
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/fperms13
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newbin19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newconfd19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newdoc19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newenvd19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newexe19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newinitd19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newins35
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newlib.a19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newlib.so19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newman19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/newsbin19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/portageq8
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/prepall23
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/prepalldocs15
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/prepallinfo9
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/prepallman19
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/prepallstrip5
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/prepinfo34
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/preplib28
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/prepman32
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/prepstrip193
-rwxr-xr-xportage_with_autodep/bin/ebuild-helpers/sed27
-rwxr-xr-xportage_with_autodep/bin/ebuild-ipc8
-rwxr-xr-xportage_with_autodep/bin/ebuild-ipc.py276
-rwxr-xr-xportage_with_autodep/bin/ebuild.sh2424
-rwxr-xr-xportage_with_autodep/bin/egencache851
-rwxr-xr-xportage_with_autodep/bin/emaint654
-rwxr-xr-xportage_with_autodep/bin/emerge66
-rwxr-xr-xportage_with_autodep/bin/emerge-webrsync457
-rwxr-xr-xportage_with_autodep/bin/env-update41
-rwxr-xr-xportage_with_autodep/bin/etc-update616
-rwxr-xr-xportage_with_autodep/bin/filter-bash-environment.py150
-rwxr-xr-xportage_with_autodep/bin/fixpackages45
-rwxr-xr-xportage_with_autodep/bin/glsa-check316
-rw-r--r--portage_with_autodep/bin/isolated-functions.sh630
-rwxr-xr-xportage_with_autodep/bin/lock-helper.py28
-rwxr-xr-xportage_with_autodep/bin/misc-functions.sh1002
-rwxr-xr-xportage_with_autodep/bin/portageq822
-rwxr-xr-xportage_with_autodep/bin/quickpkg291
-rwxr-xr-xportage_with_autodep/bin/regenworld144
-rwxr-xr-xportage_with_autodep/bin/repoman2672
-rwxr-xr-xportage_with_autodep/bin/xpak-helper.py68
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractDepPriority.py29
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py266
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractPollTask.py62
-rw-r--r--portage_with_autodep/pym/_emerge/AsynchronousLock.py288
-rw-r--r--portage_with_autodep/pym/_emerge/AsynchronousTask.py129
-rw-r--r--portage_with_autodep/pym/_emerge/AtomArg.py11
-rw-r--r--portage_with_autodep/pym/_emerge/Binpkg.py333
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py66
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py31
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgFetcher.py181
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py43
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgVerifier.py75
-rw-r--r--portage_with_autodep/pym/_emerge/Blocker.py15
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerCache.py182
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerDB.py124
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerDepPriority.py13
-rw-r--r--portage_with_autodep/pym/_emerge/CompositeTask.py157
-rw-r--r--portage_with_autodep/pym/_emerge/DepPriority.py49
-rw-r--r--portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py47
-rw-r--r--portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py85
-rw-r--r--portage_with_autodep/pym/_emerge/Dependency.py20
-rw-r--r--portage_with_autodep/pym/_emerge/DependencyArg.py33
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBinpkg.py46
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBuild.py426
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBuildDir.py109
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildExecuter.py99
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildFetcher.py302
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildFetchonly.py32
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py108
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildMerge.py56
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py133
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildPhase.py350
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildProcess.py21
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py16
-rw-r--r--portage_with_autodep/pym/_emerge/EventsAnalyser.py511
-rw-r--r--portage_with_autodep/pym/_emerge/EventsLogger.py180
-rw-r--r--portage_with_autodep/pym/_emerge/FakeVartree.py265
-rw-r--r--portage_with_autodep/pym/_emerge/FifoIpcDaemon.py81
-rw-r--r--portage_with_autodep/pym/_emerge/JobStatusDisplay.py292
-rw-r--r--portage_with_autodep/pym/_emerge/MergeListItem.py135
-rw-r--r--portage_with_autodep/pym/_emerge/MetadataRegen.py184
-rw-r--r--portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py33
-rw-r--r--portage_with_autodep/pym/_emerge/Package.py700
-rw-r--r--portage_with_autodep/pym/_emerge/PackageArg.py19
-rw-r--r--portage_with_autodep/pym/_emerge/PackageMerge.py40
-rw-r--r--portage_with_autodep/pym/_emerge/PackageUninstall.py110
-rw-r--r--portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py145
-rw-r--r--portage_with_autodep/pym/_emerge/PipeReader.py96
-rw-r--r--portage_with_autodep/pym/_emerge/PollConstants.py18
-rw-r--r--portage_with_autodep/pym/_emerge/PollScheduler.py398
-rw-r--r--portage_with_autodep/pym/_emerge/PollSelectAdapter.py73
-rw-r--r--portage_with_autodep/pym/_emerge/ProgressHandler.py22
-rw-r--r--portage_with_autodep/pym/_emerge/QueueScheduler.py116
-rw-r--r--portage_with_autodep/pym/_emerge/RootConfig.py34
-rw-r--r--portage_with_autodep/pym/_emerge/Scheduler.py1975
-rw-r--r--portage_with_autodep/pym/_emerge/SequentialTaskQueue.py89
-rw-r--r--portage_with_autodep/pym/_emerge/SetArg.py11
-rw-r--r--portage_with_autodep/pym/_emerge/SlotObject.py42
-rw-r--r--portage_with_autodep/pym/_emerge/SpawnProcess.py235
-rw-r--r--portage_with_autodep/pym/_emerge/SubProcess.py141
-rw-r--r--portage_with_autodep/pym/_emerge/Task.py42
-rw-r--r--portage_with_autodep/pym/_emerge/TaskScheduler.py25
-rw-r--r--portage_with_autodep/pym/_emerge/TaskSequence.py44
-rw-r--r--portage_with_autodep/pym/_emerge/UninstallFailure.py15
-rw-r--r--portage_with_autodep/pym/_emerge/UnmergeDepPriority.py41
-rw-r--r--portage_with_autodep/pym/_emerge/UseFlagDisplay.py122
-rw-r--r--portage_with_autodep/pym/_emerge/__init__.py2
-rw-r--r--portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py38
-rw-r--r--portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py15
-rw-r--r--portage_with_autodep/pym/_emerge/actions.py3123
-rw-r--r--portage_with_autodep/pym/_emerge/clear_caches.py19
-rw-r--r--portage_with_autodep/pym/_emerge/countdown.py22
-rw-r--r--portage_with_autodep/pym/_emerge/create_depgraph_params.py72
-rw-r--r--portage_with_autodep/pym/_emerge/create_world_atom.py92
-rw-r--r--portage_with_autodep/pym/_emerge/depgraph.py7029
-rw-r--r--portage_with_autodep/pym/_emerge/emergelog.py63
-rw-r--r--portage_with_autodep/pym/_emerge/getloadavg.py27
-rw-r--r--portage_with_autodep/pym/_emerge/help.py815
-rw-r--r--portage_with_autodep/pym/_emerge/is_valid_package_atom.py21
-rw-r--r--portage_with_autodep/pym/_emerge/main.py1910
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/__init__.py2
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/backtracking.py197
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/circular_dependency.py267
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/output.py888
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/output_helpers.py576
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/slot_collision.py978
-rw-r--r--portage_with_autodep/pym/_emerge/search.py385
-rw-r--r--portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.py35
-rw-r--r--portage_with_autodep/pym/_emerge/stdout_spinner.py83
-rw-r--r--portage_with_autodep/pym/_emerge/sync/__init__.py2
-rw-r--r--portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.py29
-rw-r--r--portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.py98
-rw-r--r--portage_with_autodep/pym/_emerge/unmerge.py578
-rw-r--r--portage_with_autodep/pym/_emerge/userquery.py55
-rw-r--r--portage_with_autodep/pym/portage/__init__.py610
-rw-r--r--portage_with_autodep/pym/portage/_global_updates.py250
-rw-r--r--portage_with_autodep/pym/portage/_legacy_globals.py81
-rw-r--r--portage_with_autodep/pym/portage/_selinux.py129
-rw-r--r--portage_with_autodep/pym/portage/_sets/__init__.py245
-rw-r--r--portage_with_autodep/pym/portage/_sets/base.py264
-rw-r--r--portage_with_autodep/pym/portage/_sets/dbapi.py383
-rw-r--r--portage_with_autodep/pym/portage/_sets/files.py341
-rw-r--r--portage_with_autodep/pym/portage/_sets/libs.py98
-rw-r--r--portage_with_autodep/pym/portage/_sets/profiles.py53
-rw-r--r--portage_with_autodep/pym/portage/_sets/security.py86
-rw-r--r--portage_with_autodep/pym/portage/_sets/shell.py44
-rw-r--r--portage_with_autodep/pym/portage/cache/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/cache/anydbm.py113
-rw-r--r--portage_with_autodep/pym/portage/cache/cache_errors.py62
-rw-r--r--portage_with_autodep/pym/portage/cache/ebuild_xattr.py171
-rw-r--r--portage_with_autodep/pym/portage/cache/flat_hash.py155
-rw-r--r--portage_with_autodep/pym/portage/cache/flat_list.py134
-rw-r--r--portage_with_autodep/pym/portage/cache/fs_template.py90
-rw-r--r--portage_with_autodep/pym/portage/cache/mappings.py485
-rw-r--r--portage_with_autodep/pym/portage/cache/metadata.py154
-rw-r--r--portage_with_autodep/pym/portage/cache/metadata_overlay.py105
-rw-r--r--portage_with_autodep/pym/portage/cache/sql_template.py301
-rw-r--r--portage_with_autodep/pym/portage/cache/sqlite.py245
-rw-r--r--portage_with_autodep/pym/portage/cache/template.py236
-rw-r--r--portage_with_autodep/pym/portage/cache/util.py170
-rw-r--r--portage_with_autodep/pym/portage/cache/volatile.py25
-rw-r--r--portage_with_autodep/pym/portage/checksum.py291
-rw-r--r--portage_with_autodep/pym/portage/const.py143
-rw-r--r--portage_with_autodep/pym/portage/cvstree.py293
-rw-r--r--portage_with_autodep/pym/portage/data.py122
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_MergeProcess.py282
-rw-r--r--portage_with_autodep/pym/portage/dbapi/__init__.py302
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py72
-rw-r--r--portage_with_autodep/pym/portage/dbapi/bintree.py1366
-rw-r--r--portage_with_autodep/pym/portage/dbapi/cpv_expand.py106
-rw-r--r--portage_with_autodep/pym/portage/dbapi/dep_expand.py56
-rw-r--r--portage_with_autodep/pym/portage/dbapi/porttree.py1168
-rw-r--r--portage_with_autodep/pym/portage/dbapi/vartree.py4527
-rw-r--r--portage_with_autodep/pym/portage/dbapi/virtual.py131
-rw-r--r--portage_with_autodep/pym/portage/debug.py120
-rw-r--r--portage_with_autodep/pym/portage/dep/__init__.py2432
-rw-r--r--portage_with_autodep/pym/portage/dep/dep_check.py679
-rw-r--r--portage_with_autodep/pym/portage/dispatch_conf.py188
-rw-r--r--portage_with_autodep/pym/portage/eapi.py50
-rw-r--r--portage_with_autodep/pym/portage/eclass_cache.py123
-rw-r--r--portage_with_autodep/pym/portage/elog/__init__.py182
-rw-r--r--portage_with_autodep/pym/portage/elog/filtering.py15
-rw-r--r--portage_with_autodep/pym/portage/elog/messages.py172
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_custom.py19
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_echo.py46
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_mail.py43
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_mail_summary.py89
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_save.py51
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_save_summary.py59
-rw-r--r--portage_with_autodep/pym/portage/elog/mod_syslog.py32
-rw-r--r--portage_with_autodep/pym/portage/env/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/env/config.py105
-rw-r--r--portage_with_autodep/pym/portage/env/loaders.py319
-rw-r--r--portage_with_autodep/pym/portage/env/validators.py20
-rw-r--r--portage_with_autodep/pym/portage/exception.py186
-rw-r--r--portage_with_autodep/pym/portage/getbinpkg.py861
-rw-r--r--portage_with_autodep/pym/portage/glsa.py699
-rw-r--r--portage_with_autodep/pym/portage/localization.py20
-rw-r--r--portage_with_autodep/pym/portage/locks.py395
-rw-r--r--portage_with_autodep/pym/portage/mail.py177
-rw-r--r--portage_with_autodep/pym/portage/manifest.py538
-rw-r--r--portage_with_autodep/pym/portage/news.py351
-rw-r--r--portage_with_autodep/pym/portage/output.py794
-rw-r--r--portage_with_autodep/pym/portage/package/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py284
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py236
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py182
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py189
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py235
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.py233
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.py23
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/features_set.py128
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/helper.py64
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py185
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.py27
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.py9
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py98
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py82
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/config.py2224
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.py42
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/digestcheck.py167
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/digestgen.py202
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/doebuild.py1791
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/fetch.py1129
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py124
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py174
-rw-r--r--portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py370
-rw-r--r--portage_with_autodep/pym/portage/process.py427
-rw-r--r--portage_with_autodep/pym/portage/proxy/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/proxy/lazyimport.py212
-rw-r--r--portage_with_autodep/pym/portage/proxy/objectproxy.py91
-rw-r--r--portage_with_autodep/pym/portage/repository/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/repository/config.py504
-rw-r--r--portage_with_autodep/pym/portage/tests/__init__.py244
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/setup_env.py85
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/test_dobin.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/bin/test_dodir.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py58
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testAtom.py315
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py219
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py18
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py75
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/testStandalone.py36
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py43
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py28
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_get_operator.py33
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py42
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_isjustname.py24
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py146
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py108
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py66
-rw-r--r--portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py627
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py43
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_config.py198
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py82
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py124
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py32
-rw-r--r--portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py52
-rw-r--r--portage_with_autodep/pym/portage/tests/env/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/env/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py37
-rw-r--r--portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py39
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py145
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py81
-rw-r--r--portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.py42
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_compile_modules.py46
-rw-r--r--portage_with_autodep/pym/portage/tests/lint/test_import_modules.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py124
-rw-r--r--portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py46
-rw-r--r--portage_with_autodep/pym/portage/tests/news/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/news/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/news/test_NewsItem.py95
-rw-r--r--portage_with_autodep/pym/portage/tests/process/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/process/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/process/test_poll.py39
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py690
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py326
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py169
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py84
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_depclean.py285
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_depth.py252
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_eapi.py115
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py453
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py31
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py318
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_multislot.py40
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py35
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_output.py88
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py138
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_required_use.py114
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_simple.py57
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py143
-rw-r--r--portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py40
-rwxr-xr-xportage_with_autodep/pym/portage/tests/runTests46
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py61
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py32
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py27
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/__init__.py0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/sets/shell/testShell.py28
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/unicode/test_string_format.py108
-rw-r--r--portage_with_autodep/pym/portage/tests/util/__init__.py4
-rw-r--r--portage_with_autodep/pym/portage/tests/util/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_digraph.py201
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_getconfig.py29
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_grabdict.py11
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py14
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackDictList.py17
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackDicts.py36
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_stackLists.py19
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py24
-rw-r--r--portage_with_autodep/pym/portage/tests/util/test_varExpand.py92
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py16
-rw-r--r--portage_with_autodep/pym/portage/tests/versions/test_vercmp.py80
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/__init__.py3
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/__test__0
-rw-r--r--portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py16
-rw-r--r--portage_with_autodep/pym/portage/update.py320
-rw-r--r--portage_with_autodep/pym/portage/util/ExtractKernelVersion.py76
-rw-r--r--portage_with_autodep/pym/portage/util/__init__.py1602
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py805
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py172
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/util/_pty.py212
-rw-r--r--portage_with_autodep/pym/portage/util/digraph.py342
-rw-r--r--portage_with_autodep/pym/portage/util/env_update.py293
-rw-r--r--portage_with_autodep/pym/portage/util/lafilefixer.py185
-rw-r--r--portage_with_autodep/pym/portage/util/listdir.py151
-rw-r--r--portage_with_autodep/pym/portage/util/movefile.py242
-rw-r--r--portage_with_autodep/pym/portage/util/mtimedb.py81
-rw-r--r--portage_with_autodep/pym/portage/versions.py403
-rw-r--r--portage_with_autodep/pym/portage/xml/__init__.py2
-rw-r--r--portage_with_autodep/pym/portage/xml/metadata.py376
-rw-r--r--portage_with_autodep/pym/portage/xpak.py497
-rw-r--r--portage_with_autodep/pym/repoman/__init__.py0
-rw-r--r--portage_with_autodep/pym/repoman/checks.py707
-rw-r--r--portage_with_autodep/pym/repoman/errors.py26
-rw-r--r--portage_with_autodep/pym/repoman/herdbase.py110
-rw-r--r--portage_with_autodep/pym/repoman/utilities.py511
421 files changed, 89247 insertions, 0 deletions
diff --git a/portage_with_autodep/bin/archive-conf b/portage_with_autodep/bin/archive-conf
new file mode 100755
index 0000000..5a03b85
--- /dev/null
+++ b/portage_with_autodep/bin/archive-conf
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+#
+# archive-conf -- save off a config file in the dispatch-conf archive dir
+#
+# Written by Wayne Davison <gentoo@blorf.net> with code snagged from
+# Jeremy Wohl's dispatch-conf script and the portage chkcontents script.
+#
+
+from __future__ import print_function
+
+import sys
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+
+from portage import os
+import dispatch_conf
+
+FIND_EXTANT_CONTENTS = "find %s -name CONTENTS"
+
+MANDATORY_OPTS = [ 'archive-dir' ]
+
+try:
+ import fchksum
+ def perform_checksum(filename): return fchksum.fmd5t(filename)
+except ImportError:
+ import md5
+ def md5_to_hex(md5sum):
+ hexform = ""
+ for ix in range(len(md5sum)):
+ hexform = hexform + "%02x" % ord(md5sum[ix])
+ return hexform.lower()
+
+ def perform_checksum(filename):
+ f = open(filename, 'rb')
+ blocksize=32768
+ data = f.read(blocksize)
+ size = 0
+ sum = md5.new()
+ while data:
+ sum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+ return (md5_to_hex(sum.digest()),size)
+
+def archive_conf():
+ args = []
+ content_files = []
+ md5_match_hash = {}
+
+ options = portage.dispatch_conf.read_config(MANDATORY_OPTS)
+
+ for conf in sys.argv[1:]:
+ if not os.path.isabs(conf):
+ conf = os.path.abspath(conf)
+ args += [ conf ]
+ md5_match_hash[conf] = ''
+
+ # Find all the CONTENT files in VDB_PATH.
+ content_files += os.popen(FIND_EXTANT_CONTENTS %
+ (os.path.join(portage.settings['EROOT'], portage.VDB_PATH))).readlines()
+
+ # Search for the saved md5 checksum of all the specified config files
+ # and see if the current file is unmodified or not.
+ try:
+ todo_cnt = len(args)
+ for file in content_files:
+ file = file.rstrip()
+ try:
+ contents = open(file, "r")
+ except IOError as e:
+ print('archive-conf: Unable to open %s: %s' % (file, e), file=sys.stderr)
+ sys.exit(1)
+ lines = contents.readlines()
+ for line in lines:
+ items = line.split()
+ if items[0] == 'obj':
+ for conf in args:
+ if items[1] == conf:
+ stored = items[2].lower()
+ real = perform_checksum(conf)[0].lower()
+ if stored == real:
+ md5_match_hash[conf] = conf
+ todo_cnt -= 1
+ if todo_cnt == 0:
+ raise StopIteration()
+ except StopIteration:
+ pass
+
+ for conf in args:
+ archive = os.path.join(options['archive-dir'], conf.lstrip('/'))
+ if options['use-rcs'] == 'yes':
+ portage.dispatch_conf.rcs_archive(archive, conf, md5_match_hash[conf], '')
+ if md5_match_hash[conf]:
+ portage.dispatch_conf.rcs_archive_post_process(archive)
+ else:
+ portage.dispatch_conf.file_archive(archive, conf, md5_match_hash[conf], '')
+ if md5_match_hash[conf]:
+ portage.dispatch_conf.file_archive_post_process(archive)
+
+# run
+if len(sys.argv) > 1:
+ archive_conf()
+else:
+ print('Usage: archive-conf /CONFIG/FILE [/CONFIG/FILE...]', file=sys.stderr)
diff --git a/portage_with_autodep/bin/banned-helper b/portage_with_autodep/bin/banned-helper
new file mode 100755
index 0000000..17ea991
--- /dev/null
+++ b/portage_with_autodep/bin/banned-helper
@@ -0,0 +1,6 @@
+#!/bin/bash
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+die "'${0##*/}' has been banned for EAPI '$EAPI'"
+exit 1
diff --git a/portage_with_autodep/bin/binhost-snapshot b/portage_with_autodep/bin/binhost-snapshot
new file mode 100755
index 0000000..9d2697d
--- /dev/null
+++ b/portage_with_autodep/bin/binhost-snapshot
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import optparse
+import os
+import sys
+import textwrap
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(
+ osp.realpath(__file__))), "pym"))
+ import portage
+
+def parse_args(argv):
+ prog_name = os.path.basename(argv[0])
+ usage = prog_name + ' [options] ' + \
+ '<src_pkg_dir> <snapshot_dir> <snapshot_uri> <binhost_dir>'
+
+ prog_desc = "This program will copy src_pkg_dir to snapshot_dir " + \
+ "and inside binhost_dir it will create a Packages index file " + \
+ "which refers to snapshot_uri. This is intended to solve race " + \
+ "conditions on binhosts as described at http://crosbug.com/3225."
+
+ usage += "\n\n"
+ for line in textwrap.wrap(prog_desc, 70):
+ usage += line + "\n"
+
+ usage += "\n"
+ usage += "Required Arguments:\n\n"
+ usage += " src_pkg_dir - the source $PKGDIR\n"
+ usage += " snapshot_dir - destination snapshot " + \
+ "directory (must not exist)\n"
+ usage += " snapshot_uri - URI which refers to " + \
+ "snapshot_dir from the\n" + \
+ " client side\n"
+ usage += " binhost_dir - directory in which to " + \
+ "write Packages index with\n" + \
+ " snapshot_uri"
+
+ parser = optparse.OptionParser(usage=usage)
+ parser.add_option('--hardlinks', help='create hardlinks (y or n, default is y)',
+ choices=('y', 'n'))
+ parser.set_defaults(hardlinks='y')
+ options, args = parser.parse_args(argv[1:])
+
+ if len(args) != 4:
+ parser.error("Required 4 arguments, got %d" % (len(args),))
+
+ return parser, options, args
+
+def main(argv):
+ parser, options, args = parse_args(argv)
+
+ src_pkg_dir, snapshot_dir, snapshot_uri, binhost_dir = args
+ src_pkgs_index = os.path.join(src_pkg_dir, 'Packages')
+
+ if not os.path.isdir(src_pkg_dir):
+ parser.error("src_pkg_dir is not a directory: '%s'" % (src_pkg_dir,))
+
+ if not os.path.isfile(src_pkgs_index):
+ parser.error("src_pkg_dir does not contain a " + \
+ "'Packages' index: '%s'" % (src_pkg_dir,))
+
+ parse_result = urlparse(snapshot_uri)
+ if not (parse_result.scheme and parse_result.netloc and parse_result.path):
+ parser.error("snapshot_uri is not a valid URI: '%s'" % (snapshot_uri,))
+
+ if os.path.isdir(snapshot_dir):
+ parser.error("snapshot_dir already exists: '%s'" % snapshot_dir)
+
+ try:
+ os.makedirs(os.path.dirname(snapshot_dir))
+ except OSError:
+ pass
+ if not os.path.isdir(os.path.dirname(snapshot_dir)):
+ parser.error("snapshot_dir parent could not be created: '%s'" % \
+ os.path.dirname(snapshot_dir))
+
+ try:
+ os.makedirs(binhost_dir)
+ except OSError:
+ pass
+ if not os.path.isdir(binhost_dir):
+ parser.error("binhost_dir could not be created: '%s'" % binhost_dir)
+
+ cp_opts = 'RP'
+ if options.hardlinks == 'n':
+ cp_opts += 'p'
+ else:
+ cp_opts += 'l'
+
+ cp_cmd = 'cp -%s %s %s' % (
+ cp_opts,
+ portage._shell_quote(src_pkg_dir),
+ portage._shell_quote(snapshot_dir)
+ )
+
+ ret = os.system(cp_cmd)
+ if not (os.WIFEXITED(ret) and os.WEXITSTATUS(ret) == os.EX_OK):
+ return 1
+
+ infile = io.open(portage._unicode_encode(src_pkgs_index,
+ encoding=portage._encodings['fs'], errors='strict'),
+ mode='r', encoding=portage._encodings['repo.content'],
+ errors='strict')
+
+ outfile = portage.util.atomic_ofstream(
+ os.path.join(binhost_dir, "Packages"),
+ encoding=portage._encodings['repo.content'],
+ errors='strict')
+
+ for line in infile:
+ if line[:4] == 'URI:':
+ # skip existing URI line
+ pass
+ else:
+ if not line.strip():
+ # end of header
+ outfile.write("URI: %s\n\n" % snapshot_uri)
+ break
+ outfile.write(line)
+
+ for line in infile:
+ outfile.write(line)
+
+ infile.close()
+ outfile.close()
+
+ return os.EX_OK
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv))
diff --git a/portage_with_autodep/bin/check-implicit-pointer-usage.py b/portage_with_autodep/bin/check-implicit-pointer-usage.py
new file mode 100755
index 0000000..8822c45
--- /dev/null
+++ b/portage_with_autodep/bin/check-implicit-pointer-usage.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+
+# Ripped from HP and updated from Debian
+# Update by Gentoo to support unicode output
+
+#
+# Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+# David Mosberger <davidm@hpl.hp.com>
+#
+# Scan standard input for GCC warning messages that are likely to
+# source of real 64-bit problems. In particular, see whether there
+# are any implicitly declared functions whose return values are later
+# interpreted as pointers. Those are almost guaranteed to cause
+# crashes.
+#
+
+from __future__ import print_function
+
+import re
+import sys
+
+implicit_pattern = re.compile("([^:]*):(\d+): warning: implicit declaration "
+ + "of function [`']([^']*)'")
+pointer_pattern = (
+ "([^:]*):(\d+): warning: "
+ + "("
+ + "(assignment"
+ + "|initialization"
+ + "|return"
+ + "|passing arg \d+ of `[^']*'"
+ + "|passing arg \d+ of pointer to function"
+ + ") makes pointer from integer without a cast"
+ + "|"
+ + "cast to pointer from integer of different size)")
+
+if sys.hexversion < 0x3000000:
+ # Use encoded byte strings in python-2.x, since the python ebuilds are
+ # known to remove the encodings module when USE=build is enabled (thus
+ # disabling unicode decoding/encoding). The portage module has a
+ # workaround for this, but currently we don't import that here since we
+ # don't want to trigger potential sandbox violations due to stale pyc
+ # files for the portage module.
+ unicode_quote_open = '\xE2\x80\x98'
+ unicode_quote_close = '\xE2\x80\x99'
+ def write(msg):
+ sys.stdout.write(msg)
+else:
+ unicode_quote_open = '\u2018'
+ unicode_quote_close = '\u2019'
+ def write(msg):
+ sys.stdout.buffer.write(msg.encode('utf_8', 'backslashreplace'))
+
+pointer_pattern = re.compile(pointer_pattern)
+
+last_implicit_filename = ""
+last_implicit_linenum = -1
+last_implicit_func = ""
+
+while True:
+ if sys.hexversion >= 0x3000000:
+ line = sys.stdin.buffer.readline().decode('utf_8', 'replace')
+ else:
+ line = sys.stdin.readline()
+ if not line:
+ break
+ # translate unicode open/close quotes to ascii ones
+ line = line.replace(unicode_quote_open, "`")
+ line = line.replace(unicode_quote_close, "'")
+ m = implicit_pattern.match(line)
+ if m:
+ last_implicit_filename = m.group(1)
+ last_implicit_linenum = int(m.group(2))
+ last_implicit_func = m.group(3)
+ else:
+ m = pointer_pattern.match(line)
+ if m:
+ pointer_filename = m.group(1)
+ pointer_linenum = int(m.group(2))
+ if (last_implicit_filename == pointer_filename
+ and last_implicit_linenum == pointer_linenum):
+ write("Function `%s' implicitly converted to pointer at " \
+ "%s:%d\n" % (last_implicit_func,
+ last_implicit_filename,
+ last_implicit_linenum))
diff --git a/portage_with_autodep/bin/clean_locks b/portage_with_autodep/bin/clean_locks
new file mode 100755
index 0000000..8c4299c
--- /dev/null
+++ b/portage_with_autodep/bin/clean_locks
@@ -0,0 +1,47 @@
+#!/usr/bin/python -O
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys, errno
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+
+from portage import os
+
+if not sys.argv[1:] or "--help" in sys.argv or "-h" in sys.argv:
+ import portage
+ print()
+ print("You must specify directories with hardlink-locks to clean.")
+ print("You may optionally specify --force, which will remove all")
+ print("of the locks, even if we can't establish if they are in use.")
+ print("Please attempt cleaning without force first.")
+ print()
+ print("%s %s/.locks" % (sys.argv[0], portage.settings["DISTDIR"]))
+ print("%s --force %s/.locks" % (sys.argv[0], portage.settings["DISTDIR"]))
+ print()
+ sys.exit(1)
+
+force = False
+if "--force" in sys.argv[1:]:
+ force=True
+
+for x in sys.argv[1:]:
+ if x == "--force":
+ continue
+ try:
+ for y in portage.locks.hardlock_cleanup(x, remove_all_locks=force):
+ print(y)
+ print()
+
+ except OSError as e:
+ if e.errno in (errno.ENOENT, errno.ENOTDIR):
+ print("!!! %s is not a directory or does not exist" % x)
+ else:
+ raise
+ sys.exit(e.errno)
diff --git a/portage_with_autodep/bin/dispatch-conf b/portage_with_autodep/bin/dispatch-conf
new file mode 100755
index 0000000..1e21a52
--- /dev/null
+++ b/portage_with_autodep/bin/dispatch-conf
@@ -0,0 +1,434 @@
+#!/usr/bin/python -O
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+#
+# dispatch-conf -- Integrate modified configs, post-emerge
+#
+# Jeremy Wohl (http://igmus.org)
+#
+# TODO
+# dialog menus
+#
+
+from __future__ import print_function
+
+from stat import ST_GID, ST_MODE, ST_UID
+from random import random
+import atexit, re, shutil, stat, sys
+
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+
+from portage import os
+from portage import dispatch_conf
+from portage import _unicode_decode
+from portage.dispatch_conf import diffstatusoutput_len
+from portage.process import find_binary
+
+FIND_EXTANT_CONFIGS = "find '%s' %s -name '._cfg????_%s' ! -name '.*~' ! -iname '.*.bak' -print"
+DIFF_CONTENTS = "diff -Nu '%s' '%s'"
+DIFF_CVS_INTERP = "diff -Nu '%s' '%s' | grep '^[+-][^+-]' | grep -v '# .Header:.*'"
+DIFF_WSCOMMENTS = "diff -Nu '%s' '%s' | grep '^[+-][^+-]' | grep -v '^[-+]#' | grep -v '^[-+][:space:]*$'"
+
+# We need a secure scratch dir and python does silly verbose errors on the use of tempnam
+oldmask = os.umask(0o077)
+SCRATCH_DIR = None
+while SCRATCH_DIR is None:
+ try:
+ mydir = "/tmp/dispatch-conf."
+ for x in range(0,8):
+ if int(random() * 3) == 0:
+ mydir += chr(int(65+random()*26.0))
+ elif int(random() * 2) == 0:
+ mydir += chr(int(97+random()*26.0))
+ else:
+ mydir += chr(int(48+random()*10.0))
+ if os.path.exists(mydir):
+ continue
+ os.mkdir(mydir)
+ SCRATCH_DIR = mydir
+ except OSError as e:
+ if e.errno != 17:
+ raise
+os.umask(oldmask)
+
+# Ensure the scratch dir is deleted
+def cleanup(mydir=SCRATCH_DIR):
+ shutil.rmtree(mydir)
+atexit.register(cleanup)
+
+MANDATORY_OPTS = [ 'archive-dir', 'diff', 'replace-cvs', 'replace-wscomments', 'merge' ]
+
+class dispatch:
+ options = {}
+
+ def grind (self, config_paths):
+ confs = []
+ count = 0
+
+ config_root = '/'
+ self.options = portage.dispatch_conf.read_config(MANDATORY_OPTS)
+
+ if "log-file" in self.options:
+ if os.path.isfile(self.options["log-file"]):
+ shutil.copy(self.options["log-file"], self.options["log-file"] + '.old')
+ if os.path.isfile(self.options["log-file"]) \
+ or not os.path.exists(self.options["log-file"]):
+ open(self.options["log-file"], 'w').close() # Truncate it
+ os.chmod(self.options["log-file"], 0o600)
+ else:
+ self.options["log-file"] = "/dev/null"
+
+ #
+ # Build list of extant configs
+ #
+
+ for path in config_paths:
+ path = portage.normalize_path(path)
+ try:
+ mymode = os.stat(path).st_mode
+ except OSError:
+ continue
+ basename = "*"
+ find_opts = "-name '.*' -type d -prune -o"
+ if not stat.S_ISDIR(mymode):
+ path, basename = os.path.split(path)
+ find_opts = "-maxdepth 1"
+
+ confs += self.massage(os.popen(FIND_EXTANT_CONFIGS % (path, find_opts, basename)).readlines())
+
+ if self.options['use-rcs'] == 'yes':
+ for rcs_util in ("rcs", "ci", "co", "rcsmerge"):
+ if not find_binary(rcs_util):
+ print('dispatch-conf: Error finding all RCS utils and " + \
+ "use-rcs=yes in config; fatal', file=sys.stderr)
+ return False
+
+
+ # config file freezing support
+ frozen_files = set(self.options.get("frozen-files", "").split())
+ auto_zapped = []
+ protect_obj = portage.util.ConfigProtect(
+ config_root, config_paths,
+ portage.util.shlex_split(
+ portage.settings.get('CONFIG_PROTECT_MASK', '')))
+
+ #
+ # Remove new configs identical to current
+ # and
+ # Auto-replace configs a) whose differences are simply CVS interpolations,
+ # or b) whose differences are simply ws or comments,
+ # or c) in paths now unprotected by CONFIG_PROTECT_MASK,
+ #
+
+ def f (conf):
+ mrgconf = re.sub(r'\._cfg', '._mrg', conf['new'])
+ archive = os.path.join(self.options['archive-dir'], conf['current'].lstrip('/'))
+ if self.options['use-rcs'] == 'yes':
+ mrgfail = portage.dispatch_conf.rcs_archive(archive, conf['current'], conf['new'], mrgconf)
+ else:
+ mrgfail = portage.dispatch_conf.file_archive(archive, conf['current'], conf['new'], mrgconf)
+ if os.path.exists(archive + '.dist'):
+ unmodified = diffstatusoutput_len(DIFF_CONTENTS % (conf['current'], archive + '.dist'))[1] == 0
+ else:
+ unmodified = 0
+ if os.path.exists(mrgconf):
+ if mrgfail or diffstatusoutput_len(DIFF_CONTENTS % (conf['new'], mrgconf))[1] == 0:
+ os.unlink(mrgconf)
+ newconf = conf['new']
+ else:
+ newconf = mrgconf
+ else:
+ newconf = conf['new']
+
+ if newconf == mrgconf and \
+ self.options.get('ignore-previously-merged') != 'yes' and \
+ os.path.exists(archive+'.dist') and \
+ diffstatusoutput_len(DIFF_CONTENTS % (archive+'.dist', conf['new']))[1] == 0:
+ # The current update is identical to the archived .dist
+ # version that has previously been merged.
+ os.unlink(mrgconf)
+ newconf = conf['new']
+
+ mystatus, myoutput_len = diffstatusoutput_len(
+ DIFF_CONTENTS % (conf ['current'], newconf))
+ same_file = 0 == myoutput_len
+ if mystatus >> 8 == 2:
+ # Binary files differ
+ same_cvs = False
+ same_wsc = False
+ else:
+ same_cvs = 0 == diffstatusoutput_len(
+ DIFF_CVS_INTERP % (conf ['current'], newconf))[1]
+ same_wsc = 0 == diffstatusoutput_len(
+ DIFF_WSCOMMENTS % (conf ['current'], newconf))[1]
+
+ # Do options permit?
+ same_cvs = same_cvs and self.options['replace-cvs'] == 'yes'
+ same_wsc = same_wsc and self.options['replace-wscomments'] == 'yes'
+ unmodified = unmodified and self.options['replace-unmodified'] == 'yes'
+
+ if same_file:
+ os.unlink (conf ['new'])
+ self.post_process(conf['current'])
+ if os.path.exists(mrgconf):
+ os.unlink(mrgconf)
+ return False
+ elif conf['current'] in frozen_files:
+ """Frozen files are automatically zapped. The new config has
+ already been archived with a .new suffix. When zapped, it is
+ left with the .new suffix (post_process is skipped), since it
+ hasn't been merged into the current config."""
+ auto_zapped.append(conf['current'])
+ os.unlink(conf['new'])
+ try:
+ os.unlink(mrgconf)
+ except OSError:
+ pass
+ return False
+ elif unmodified or same_cvs or same_wsc or \
+ not protect_obj.isprotected(conf['current']):
+ self.replace(newconf, conf['current'])
+ self.post_process(conf['current'])
+ if newconf == mrgconf:
+ os.unlink(conf['new'])
+ elif os.path.exists(mrgconf):
+ os.unlink(mrgconf)
+ return False
+ else:
+ return True
+
+ confs = [x for x in confs if f(x)]
+
+ #
+ # Interactively process remaining
+ #
+
+ valid_input = "qhtnmlezu"
+
+ for conf in confs:
+ count = count + 1
+
+ newconf = conf['new']
+ mrgconf = re.sub(r'\._cfg', '._mrg', newconf)
+ if os.path.exists(mrgconf):
+ newconf = mrgconf
+ show_new_diff = 0
+
+ while 1:
+ clear_screen()
+ if show_new_diff:
+ cmd = self.options['diff'] % (conf['new'], mrgconf)
+ spawn_shell(cmd)
+ show_new_diff = 0
+ else:
+ cmd = self.options['diff'] % (conf['current'], newconf)
+ spawn_shell(cmd)
+
+ print()
+ print('>> (%i of %i) -- %s' % (count, len(confs), conf ['current']))
+ print('>> q quit, h help, n next, e edit-new, z zap-new, u use-new\n m merge, t toggle-merge, l look-merge: ', end=' ')
+
+ # In some cases getch() will return some spurious characters
+ # that do not represent valid input. If we don't validate the
+ # input then the spurious characters can cause us to jump
+ # back into the above "diff" command immediatly after the user
+ # has exited it (which can be quite confusing and gives an
+ # "out of control" feeling).
+ while True:
+ c = getch()
+ if c in valid_input:
+ sys.stdout.write('\n')
+ sys.stdout.flush()
+ break
+
+ if c == 'q':
+ sys.exit (0)
+ if c == 'h':
+ self.do_help ()
+ continue
+ elif c == 't':
+ if newconf == mrgconf:
+ newconf = conf['new']
+ elif os.path.exists(mrgconf):
+ newconf = mrgconf
+ continue
+ elif c == 'n':
+ break
+ elif c == 'm':
+ merged = SCRATCH_DIR+"/"+os.path.basename(conf['current'])
+ print()
+ ret = os.system (self.options['merge'] % (merged, conf ['current'], newconf))
+ ret = os.WEXITSTATUS(ret)
+ if ret < 2:
+ ret = 0
+ if ret:
+ print("Failure running 'merge' command")
+ continue
+ shutil.copyfile(merged, mrgconf)
+ os.remove(merged)
+ mystat = os.lstat(conf['new'])
+ os.chmod(mrgconf, mystat[ST_MODE])
+ os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
+ newconf = mrgconf
+ continue
+ elif c == 'l':
+ show_new_diff = 1
+ continue
+ elif c == 'e':
+ if 'EDITOR' not in os.environ:
+ os.environ['EDITOR']='nano'
+ os.system(os.environ['EDITOR'] + ' ' + newconf)
+ continue
+ elif c == 'z':
+ os.unlink(conf['new'])
+ if os.path.exists(mrgconf):
+ os.unlink(mrgconf)
+ break
+ elif c == 'u':
+ self.replace(newconf, conf ['current'])
+ self.post_process(conf['current'])
+ if newconf == mrgconf:
+ os.unlink(conf['new'])
+ elif os.path.exists(mrgconf):
+ os.unlink(mrgconf)
+ break
+ else:
+ raise AssertionError("Invalid Input: %s" % c)
+
+ if auto_zapped:
+ print()
+ print(" One or more updates are frozen and have been automatically zapped:")
+ print()
+ for frozen in auto_zapped:
+ print(" * '%s'" % frozen)
+ print()
+
+ def replace (self, newconf, curconf):
+ """Replace current config with the new/merged version. Also logs
+ the diff of what changed into the configured log file."""
+ os.system((DIFF_CONTENTS % (curconf, newconf)) + '>>' + self.options["log-file"])
+ try:
+ os.rename(newconf, curconf)
+ except (IOError, os.error) as why:
+ print('dispatch-conf: Error renaming %s to %s: %s; fatal' % \
+ (newconf, curconf, str(why)), file=sys.stderr)
+
+
+ def post_process(self, curconf):
+ archive = os.path.join(self.options['archive-dir'], curconf.lstrip('/'))
+ if self.options['use-rcs'] == 'yes':
+ portage.dispatch_conf.rcs_archive_post_process(archive)
+ else:
+ portage.dispatch_conf.file_archive_post_process(archive)
+
+
+ def massage (self, newconfigs):
+ """Sort, rstrip, remove old versions, break into triad hash.
+
+ Triad is dictionary of current (/etc/make.conf), new (/etc/._cfg0003_make.conf)
+ and dir (/etc).
+
+ We keep ._cfg0002_conf over ._cfg0001_conf and ._cfg0000_conf.
+ """
+ h = {}
+ configs = []
+ newconfigs.sort ()
+
+ for nconf in newconfigs:
+ nconf = nconf.rstrip ()
+ conf = re.sub (r'\._cfg\d+_', '', nconf)
+ dirname = os.path.dirname(nconf)
+ conf_map = {
+ 'current' : conf,
+ 'dir' : dirname,
+ 'new' : nconf,
+ }
+
+ if conf in h:
+ mrgconf = re.sub(r'\._cfg', '._mrg', h[conf]['new'])
+ if os.path.exists(mrgconf):
+ os.unlink(mrgconf)
+ os.unlink(h[conf]['new'])
+ h[conf].update(conf_map)
+ else:
+ h[conf] = conf_map
+ configs.append(conf_map)
+
+ return configs
+
+
+ def do_help (self):
+ print(); print
+
+ print(' u -- update current config with new config and continue')
+ print(' z -- zap (delete) new config and continue')
+ print(' n -- skip to next config, leave all intact')
+ print(' e -- edit new config')
+ print(' m -- interactively merge current and new configs')
+ print(' l -- look at diff between pre-merged and merged configs')
+ print(' t -- toggle new config between merged and pre-merged state')
+ print(' h -- this screen')
+ print(' q -- quit')
+
+ print(); print('press any key to return to diff...', end=' ')
+
+ getch ()
+
+
+def getch ():
+ # from ASPN - Danny Yoo
+ #
+ import sys, tty, termios
+
+ fd = sys.stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(sys.stdin.fileno())
+ ch = sys.stdin.read(1)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ return ch
+
+def clear_screen():
+ try:
+ import curses
+ try:
+ curses.setupterm()
+ sys.stdout.write(_unicode_decode(curses.tigetstr("clear")))
+ sys.stdout.flush()
+ return
+ except curses.error:
+ pass
+ except ImportError:
+ pass
+ os.system("clear 2>/dev/null")
+
+from portage.process import find_binary, spawn
+shell = os.environ.get("SHELL")
+if not shell or not os.access(shell, os.EX_OK):
+ shell = find_binary("sh")
+
+def spawn_shell(cmd):
+ if shell:
+ spawn([shell, "-c", cmd], env=os.environ,
+ fd_pipes = { 0 : sys.stdin.fileno(),
+ 1 : sys.stdout.fileno(),
+ 2 : sys.stderr.fileno()})
+ else:
+ os.system(cmd)
+
+# run
+d = dispatch ()
+
+if len(sys.argv) > 1:
+ # for testing
+ d.grind(sys.argv[1:])
+else:
+ d.grind(portage.util.shlex_split(
+ portage.settings.get('CONFIG_PROTECT', '')))
diff --git a/portage_with_autodep/bin/dohtml.py b/portage_with_autodep/bin/dohtml.py
new file mode 100755
index 0000000..00258ec
--- /dev/null
+++ b/portage_with_autodep/bin/dohtml.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+#
+# Typical usage:
+# dohtml -r docs/*
+# - put all files and directories in docs into /usr/share/doc/${PF}/html
+# dohtml foo.html
+# - put foo.html into /usr/share/doc/${PF}/html
+#
+#
+# Detailed usage:
+# dohtml <list-of-files>
+# - will install the files in the list of files (space-separated list) into
+# /usr/share/doc/${PF}/html, provided the file ends in .htm, .html, .css,
+# .js, ,gif, .jpeg, .jpg, or .png.
+# dohtml -r <list-of-files-and-directories>
+# - will do as 'dohtml', but recurse into all directories, as long as the
+# directory name is not CVS
+# dohtml -A jpe,java [-r] <list-of-files[-and-directories]>
+# - will do as 'dohtml' but add .jpe,.java (default filter list is
+# added to your list)
+# dohtml -a png,gif,html,htm [-r] <list-of-files[-and-directories]>
+# - will do as 'dohtml' but filter on .png,.gif,.html,.htm (default filter
+# list is ignored)
+# dohtml -x CVS,SCCS,RCS -r <list-of-files-and-directories>
+# - will do as 'dohtml -r', but ignore directories named CVS, SCCS, RCS
+#
+
+from __future__ import print_function
+
+import os
+import sys
+
+def dodir(path):
+ os.spawnlp(os.P_WAIT, "install", "install", "-d", path)
+
+def dofile(src,dst):
+ os.spawnlp(os.P_WAIT, "install", "install", "-m0644", src, dst)
+
+def eqawarn(lines):
+ cmd = "source '%s/isolated-functions.sh' ; " % \
+ os.environ["PORTAGE_BIN_PATH"]
+ for line in lines:
+ cmd += "eqawarn \"%s\" ; " % line
+ os.spawnlp(os.P_WAIT, "bash", "bash", "-c", cmd)
+
+skipped_directories = []
+
+def install(basename, dirname, options, prefix=""):
+ fullpath = basename
+ if prefix:
+ fullpath = prefix + "/" + fullpath
+ if dirname:
+ fullpath = dirname + "/" + fullpath
+
+ if options.DOCDESTTREE:
+ destdir = options.D + "usr/share/doc/" + options.PF + "/" + options.DOCDESTTREE + "/" + options.doc_prefix + "/" + prefix
+ else:
+ destdir = options.D + "usr/share/doc/" + options.PF + "/html/" + options.doc_prefix + "/" + prefix
+
+ if not os.path.exists(fullpath):
+ sys.stderr.write("!!! dohtml: %s does not exist\n" % fullpath)
+ return False
+ elif os.path.isfile(fullpath):
+ ext = os.path.splitext(basename)[1]
+ if (len(ext) and ext[1:] in options.allowed_exts) or basename in options.allowed_files:
+ dodir(destdir)
+ dofile(fullpath, destdir + "/" + basename)
+ elif options.recurse and os.path.isdir(fullpath) and \
+ basename not in options.disallowed_dirs:
+ for i in os.listdir(fullpath):
+ pfx = basename
+ if prefix: pfx = prefix + "/" + pfx
+ install(i, dirname, options, pfx)
+ elif not options.recurse and os.path.isdir(fullpath):
+ global skipped_directories
+ skipped_directories.append(fullpath)
+ return False
+ else:
+ return False
+ return True
+
+
+class OptionsClass:
+ def __init__(self):
+ self.PF = ""
+ self.D = ""
+ self.DOCDESTTREE = ""
+
+ if "PF" in os.environ:
+ self.PF = os.environ["PF"]
+ if "D" in os.environ:
+ self.D = os.environ["D"]
+ if "_E_DOCDESTTREE_" in os.environ:
+ self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
+
+ self.allowed_exts = [ 'htm', 'html', 'css', 'js',
+ 'gif', 'jpeg', 'jpg', 'png' ]
+ self.allowed_files = []
+ self.disallowed_dirs = [ 'CVS' ]
+ self.recurse = False
+ self.verbose = False
+ self.doc_prefix = ""
+
+def print_help():
+ opts = OptionsClass()
+
+ print("dohtml [-a .foo,.bar] [-A .foo,.bar] [-f foo,bar] [-x foo,bar]")
+ print(" [-r] [-V] <file> [file ...]")
+ print()
+ print(" -a Set the list of allowed to those that are specified.")
+ print(" Default:", ",".join(opts.allowed_exts))
+ print(" -A Extend the list of allowed file types.")
+ print(" -f Set list of allowed extensionless file names.")
+ print(" -x Set directories to be excluded from recursion.")
+ print(" Default:", ",".join(opts.disallowed_dirs))
+ print(" -p Set a document prefix for installed files (empty by default).")
+ print(" -r Install files and directories recursively.")
+ print(" -V Be verbose.")
+ print()
+
+def parse_args():
+ options = OptionsClass()
+ args = []
+
+ x = 1
+ while x < len(sys.argv):
+ arg = sys.argv[x]
+ if arg in ["-h","-r","-V"]:
+ if arg == "-h":
+ print_help()
+ sys.exit(0)
+ elif arg == "-r":
+ options.recurse = True
+ elif arg == "-V":
+ options.verbose = True
+ elif sys.argv[x] in ["-A","-a","-f","-x","-p"]:
+ x += 1
+ if x == len(sys.argv):
+ print_help()
+ sys.exit(0)
+ elif arg == "-p":
+ options.doc_prefix = sys.argv[x]
+ else:
+ values = sys.argv[x].split(",")
+ if arg == "-A":
+ options.allowed_exts.extend(values)
+ elif arg == "-a":
+ options.allowed_exts = values
+ elif arg == "-f":
+ options.allowed_files = values
+ elif arg == "-x":
+ options.disallowed_dirs = values
+ else:
+ args.append(sys.argv[x])
+ x += 1
+
+ return (options, args)
+
+def main():
+
+ (options, args) = parse_args()
+
+ if options.verbose:
+ print("Allowed extensions:", options.allowed_exts)
+ print("Document prefix : '" + options.doc_prefix + "'")
+ print("Allowed files :", options.allowed_files)
+
+ success = False
+
+ for x in args:
+ basename = os.path.basename(x)
+ dirname = os.path.dirname(x)
+ success |= install(basename, dirname, options)
+
+ global skipped_directories
+ for x in skipped_directories:
+ eqawarn(["QA Notice: dohtml on directory " + \
+ "'%s' without recursion option" % x])
+
+ if success:
+ retcode = 0
+ else:
+ retcode = 1
+
+ sys.exit(retcode)
+
+if __name__ == "__main__":
+ main()
diff --git a/portage_with_autodep/bin/ebuild b/portage_with_autodep/bin/ebuild
new file mode 100755
index 0000000..f8b6d79
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild
@@ -0,0 +1,346 @@
+#!/usr/bin/python -O
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+ def exithandler(signum,frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGINT, exithandler)
+ signal.signal(signal.SIGTERM, exithandler)
+ # Prevent "[Errno 32] Broken pipe" exceptions when
+ # writing to a pipe.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+except KeyboardInterrupt:
+ sys.exit(128 + signal.SIGINT)
+
+def debug_signal(signum, frame):
+ import pdb
+ pdb.set_trace()
+signal.signal(signal.SIGUSR1, debug_signal)
+
+import imp
+import optparse
+import os
+
+description = "See the ebuild(1) man page for more info"
+usage = "Usage: ebuild <ebuild file> <command> [command] ..."
+parser = optparse.OptionParser(description=description, usage=usage)
+
+force_help = "When used together with the digest or manifest " + \
+ "command, this option forces regeneration of digests for all " + \
+ "distfiles associated with the current ebuild. Any distfiles " + \
+ "that do not already exist in ${DISTDIR} will be automatically fetched."
+
+parser.add_option("--force", help=force_help, action="store_true", dest="force")
+parser.add_option("--color", help="enable or disable color output",
+ type="choice", choices=("y", "n"))
+parser.add_option("--debug", help="show debug output",
+ action="store_true", dest="debug")
+parser.add_option("--ignore-default-opts",
+ action="store_true",
+ help="do not use the EBUILD_DEFAULT_OPTS environment variable")
+parser.add_option("--skip-manifest", help="skip all manifest checks",
+ action="store_true", dest="skip_manifest")
+
+opts, pargs = parser.parse_args(args=sys.argv[1:])
+
+if len(pargs) < 2:
+ parser.error("missing required args")
+
+if "merge" in pargs:
+ print("Disabling noauto in features... merge disables it. (qmerge doesn't)")
+ os.environ["FEATURES"] = os.environ.get("FEATURES", "") + " -noauto"
+
+os.environ["PORTAGE_CALLER"]="ebuild"
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+
+portage.dep._internal_warnings = True
+from portage import os
+from portage import _encodings
+from portage import _shell_quote
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.const import VDB_PATH
+from _emerge.Package import Package
+from _emerge.RootConfig import RootConfig
+
+if not opts.ignore_default_opts:
+ default_opts = portage.settings.get("EBUILD_DEFAULT_OPTS", "").split()
+ opts, pargs = parser.parse_args(default_opts + sys.argv[1:])
+
+debug = opts.debug
+force = opts.force
+
+import portage.util, portage.const
+
+# do this _after_ 'import portage' to prevent unnecessary tracing
+if debug and "python-trace" in portage.features:
+ import portage.debug
+ portage.debug.set_trace(True)
+
+if not opts.color == 'y' and \
+ (opts.color == 'n' or \
+ portage.settings.get('NOCOLOR') in ('yes', 'true') or \
+ portage.settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty()):
+ portage.output.nocolor()
+ portage.settings.unlock()
+ portage.settings['NOCOLOR'] = 'true'
+ portage.settings.lock()
+
+ebuild = pargs.pop(0)
+
+pf = None
+if ebuild.endswith(".ebuild"):
+ pf = os.path.basename(ebuild)[:-7]
+
+if pf is None:
+ portage.writemsg("'%s' does not end with '.ebuild'.\n" % \
+ (ebuild,), noiselevel=-1)
+ sys.exit(1)
+
+if not os.path.isabs(ebuild):
+ mycwd = os.getcwd()
+ # Try to get the non-canonical path from the PWD evironment variable, since
+ # the canonical path returned from os.getcwd() may may be unusable in
+ # cases where the directory stucture is built from symlinks.
+ pwd = os.environ.get('PWD', '')
+ if sys.hexversion < 0x3000000:
+ pwd = _unicode_decode(pwd, encoding=_encodings['content'],
+ errors='strict')
+ if pwd and pwd != mycwd and \
+ os.path.realpath(pwd) == mycwd:
+ mycwd = portage.normalize_path(pwd)
+ ebuild = os.path.join(mycwd, ebuild)
+ebuild = portage.normalize_path(ebuild)
+# portdbapi uses the canonical path for the base of the portage tree, but
+# subdirectories of the base can be built from symlinks (like crossdev does).
+ebuild_portdir = os.path.realpath(
+ os.path.dirname(os.path.dirname(os.path.dirname(ebuild))))
+ebuild = os.path.join(ebuild_portdir, *ebuild.split(os.path.sep)[-3:])
+vdb_path = os.path.realpath(os.path.join(portage.settings['EROOT'], VDB_PATH))
+
+# Make sure that portdb.findname() returns the correct ebuild.
+if ebuild_portdir != vdb_path and \
+ ebuild_portdir not in portage.portdb.porttrees:
+ if sys.hexversion >= 0x3000000:
+ os.environ["PORTDIR_OVERLAY"] = \
+ os.environ.get("PORTDIR_OVERLAY","") + \
+ " " + _shell_quote(ebuild_portdir)
+ else:
+ os.environ["PORTDIR_OVERLAY"] = \
+ os.environ.get("PORTDIR_OVERLAY","") + \
+ " " + _unicode_encode(_shell_quote(ebuild_portdir),
+ encoding=_encodings['content'], errors='strict')
+
+ print("Appending %s to PORTDIR_OVERLAY..." % ebuild_portdir)
+ imp.reload(portage)
+
+# Constrain eclass resolution to the master(s)
+# that are specified in layout.conf (using an
+# approach similar to repoman's).
+myrepo = None
+if ebuild_portdir != vdb_path:
+ myrepo = portage.portdb.getRepositoryName(ebuild_portdir)
+ repo_info = portage.portdb._repo_info[ebuild_portdir]
+ portage.portdb.porttrees = list(repo_info.eclass_db.porttrees)
+
+if not os.path.exists(ebuild):
+ print("'%s' does not exist." % ebuild)
+ sys.exit(1)
+
+ebuild_split = ebuild.split("/")
+cpv = "%s/%s" % (ebuild_split[-3], pf)
+
+if not portage.catpkgsplit(cpv):
+ print("!!! %s does not follow correct package syntax." % (cpv))
+ sys.exit(1)
+
+if ebuild.startswith(vdb_path):
+ mytree = "vartree"
+ pkg_type = "installed"
+
+ portage_ebuild = portage.db[portage.root][mytree].dbapi.findname(cpv, myrepo=myrepo)
+
+ if os.path.realpath(portage_ebuild) != ebuild:
+ print("!!! Portage seems to think that %s is at %s" % (cpv, portage_ebuild))
+ sys.exit(1)
+
+else:
+ mytree = "porttree"
+ pkg_type = "ebuild"
+
+ portage_ebuild = portage.portdb.findname(cpv, myrepo=myrepo)
+
+ if not portage_ebuild or portage_ebuild != ebuild:
+ print("!!! %s does not seem to have a valid PORTDIR structure." % ebuild)
+ sys.exit(1)
+
+if len(pargs) > 1 and "config" in pargs:
+ print("config must be called on it's own, not combined with any other phase")
+ sys.exit(1)
+
+def discard_digests(myebuild, mysettings, mydbapi):
+ """Discard all distfiles digests for the given ebuild. This is useful when
+ upstream has changed the identity of the distfiles and the user would
+ otherwise have to manually remove the Manifest and files/digest-* files in
+ order to ensure correct results."""
+ try:
+ portage._doebuild_manifest_exempt_depend += 1
+ pkgdir = os.path.dirname(myebuild)
+ fetchlist_dict = portage.FetchlistDict(pkgdir, mysettings, mydbapi)
+ from portage.manifest import Manifest
+ mf = Manifest(pkgdir, mysettings["DISTDIR"],
+ fetchlist_dict=fetchlist_dict, manifest1_compat=False)
+ mf.create(requiredDistfiles=None,
+ assumeDistHashesSometimes=True, assumeDistHashesAlways=True)
+ distfiles = fetchlist_dict[cpv]
+ for myfile in distfiles:
+ try:
+ del mf.fhashdict["DIST"][myfile]
+ except KeyError:
+ pass
+ mf.write()
+ finally:
+ portage._doebuild_manifest_exempt_depend -= 1
+
+portage.settings.validate() # generate warning messages if necessary
+
+build_dir_phases = set(["setup", "unpack", "prepare", "configure", "compile",
+ "test", "install", "package", "rpm", "merge", "qmerge"])
+
+# If the current metadata is invalid then force the ebuild to be
+# sourced again even if $T/environment already exists.
+ebuild_changed = False
+if mytree == "porttree" and build_dir_phases.intersection(pargs):
+ metadata, st, emtime = \
+ portage.portdb._pull_valid_cache(cpv, ebuild, ebuild_portdir)
+ if metadata is None:
+ ebuild_changed = True
+
+tmpsettings = portage.config(clone=portage.settings)
+tmpsettings["PORTAGE_VERBOSE"] = "1"
+tmpsettings.backup_changes("PORTAGE_VERBOSE")
+
+if opts.skip_manifest:
+ tmpsettings["EBUILD_SKIP_MANIFEST"] = "1"
+ tmpsettings.backup_changes("EBUILD_SKIP_MANIFEST")
+
+if opts.skip_manifest or \
+ "digest" in tmpsettings.features or \
+ "digest" in pargs or \
+ "manifest" in pargs:
+ portage._doebuild_manifest_exempt_depend += 1
+
+if "test" in pargs:
+ # This variable is a signal to config.regenerate() to
+ # indicate that the test phase should be enabled regardless
+ # of problems such as masked "test" USE flag.
+ tmpsettings["EBUILD_FORCE_TEST"] = "1"
+ tmpsettings.backup_changes("EBUILD_FORCE_TEST")
+ tmpsettings.features.add("test")
+
+tmpsettings.features.discard("fail-clean")
+
+try:
+ metadata = dict(zip(Package.metadata_keys,
+ portage.db[portage.settings["ROOT"]][mytree].dbapi.aux_get(
+ cpv, Package.metadata_keys, myrepo=myrepo)))
+except KeyError:
+ # aux_get failure, message should have been shown on stderr.
+ sys.exit(1)
+
+root_config = RootConfig(portage.settings,
+ portage.db[portage.settings["ROOT"]], None)
+
+pkg = Package(built=(pkg_type != "ebuild"), cpv=cpv,
+ installed=(pkg_type=="installed"),
+ metadata=metadata, root_config=root_config,
+ type_name=pkg_type)
+
+# Apply package.env and repo-level settings. This allows per-package
+# FEATURES and other variables (possibly PORTAGE_TMPDIR) to be
+# available as soon as possible.
+tmpsettings.setcpv(pkg)
+
+def stale_env_warning():
+ if "clean" not in pargs and \
+ "noauto" not in tmpsettings.features and \
+ build_dir_phases.intersection(pargs):
+ portage.doebuild_environment(ebuild, "setup", portage.root,
+ tmpsettings, debug, 1, portage.portdb)
+ env_filename = os.path.join(tmpsettings["T"], "environment")
+ if os.path.exists(env_filename):
+ msg = ("Existing ${T}/environment for '%s' will be sourced. " + \
+ "Run 'clean' to start with a fresh environment.") % \
+ (tmpsettings["PF"], )
+ from textwrap import wrap
+ msg = wrap(msg, 70)
+ for x in msg:
+ portage.writemsg(">>> %s\n" % x)
+
+ if ebuild_changed:
+ open(os.path.join(tmpsettings['PORTAGE_BUILDDIR'],
+ '.ebuild_changed'), 'w')
+
+from portage.exception import PermissionDenied, \
+ PortagePackageException, UnsupportedAPIException
+
+if 'digest' in tmpsettings.features and \
+ not set(["digest", "manifest"]).intersection(pargs):
+ pargs = ['digest'] + pargs
+
+checked_for_stale_env = False
+
+for arg in pargs:
+ try:
+ if not checked_for_stale_env and arg not in ("digest","manifest"):
+ # This has to go after manifest generation since otherwise
+ # aux_get() might fail due to invalid ebuild digests.
+ stale_env_warning()
+ checked_for_stale_env = True
+
+ if arg in ("digest", "manifest") and force:
+ discard_digests(ebuild, tmpsettings, portage.portdb)
+ a = portage.doebuild(ebuild, arg, portage.root, tmpsettings,
+ debug=debug, tree=mytree,
+ vartree=portage.db[portage.root]['vartree'])
+ except KeyboardInterrupt:
+ print("Interrupted.")
+ a = 1
+ except KeyError:
+ # aux_get error
+ a = 1
+ except UnsupportedAPIException as e:
+ from textwrap import wrap
+ msg = wrap(str(e), 70)
+ del e
+ for x in msg:
+ portage.writemsg("!!! %s\n" % x, noiselevel=-1)
+ a = 1
+ except PortagePackageException as e:
+ portage.writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ a = 1
+ except PermissionDenied as e:
+ portage.writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
+ a = 1
+ if a == None:
+ print("Could not run the required binary?")
+ a = 127
+ if a:
+ sys.exit(a)
diff --git a/portage_with_autodep/bin/ebuild-helpers/4/dodoc b/portage_with_autodep/bin/ebuild-helpers/4/dodoc
new file mode 120000
index 0000000..35080ad
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/4/dodoc
@@ -0,0 +1 @@
+../doins \ No newline at end of file
diff --git a/portage_with_autodep/bin/ebuild-helpers/4/dohard b/portage_with_autodep/bin/ebuild-helpers/4/dohard
new file mode 120000
index 0000000..1a6b57a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/4/dohard
@@ -0,0 +1 @@
+../../banned-helper \ No newline at end of file
diff --git a/portage_with_autodep/bin/ebuild-helpers/4/dosed b/portage_with_autodep/bin/ebuild-helpers/4/dosed
new file mode 120000
index 0000000..1a6b57a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/4/dosed
@@ -0,0 +1 @@
+../../banned-helper \ No newline at end of file
diff --git a/portage_with_autodep/bin/ebuild-helpers/4/prepalldocs b/portage_with_autodep/bin/ebuild-helpers/4/prepalldocs
new file mode 120000
index 0000000..1a6b57a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/4/prepalldocs
@@ -0,0 +1 @@
+../../banned-helper \ No newline at end of file
diff --git a/portage_with_autodep/bin/ebuild-helpers/die b/portage_with_autodep/bin/ebuild-helpers/die
new file mode 100755
index 0000000..9869141
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/die
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+die "$@"
+exit 1
diff --git a/portage_with_autodep/bin/ebuild-helpers/dobin b/portage_with_autodep/bin/ebuild-helpers/dobin
new file mode 100755
index 0000000..e385455
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dobin
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if [[ ! -d ${D}${DESTTREE}/bin ]] ; then
+ install -d "${D}${DESTTREE}/bin" || { helpers_die "${0##*/}: failed to install ${D}${DESTTREE}/bin"; exit 2; }
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [[ -e ${x} ]] ; then
+ install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${D}${DESTTREE}/bin"
+ else
+ echo "!!! ${0##*/}: $x does not exist" 1>&2
+ false
+ fi
+ ((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/portage_with_autodep/bin/ebuild-helpers/doconfd b/portage_with_autodep/bin/ebuild-helpers/doconfd
new file mode 100755
index 0000000..e146000
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doconfd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+ source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/etc/conf.d/" \
+doins "$@"
diff --git a/portage_with_autodep/bin/ebuild-helpers/dodir b/portage_with_autodep/bin/ebuild-helpers/dodir
new file mode 100755
index 0000000..f40bee7
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dodir
@@ -0,0 +1,10 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+install -d ${DIROPTIONS} "${@/#/${D}/}"
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret
diff --git a/portage_with_autodep/bin/ebuild-helpers/dodoc b/portage_with_autodep/bin/ebuild-helpers/dodoc
new file mode 100755
index 0000000..65713db
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dodoc
@@ -0,0 +1,31 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [ $# -lt 1 ] ; then
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+dir="${D}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
+if [ ! -d "${dir}" ] ; then
+ install -d "${dir}"
+fi
+
+ret=0
+for x in "$@" ; do
+ if [ -d "${x}" ] ; then
+ eqawarn "QA Notice: dodoc argument '${x}' is a directory"
+ elif [ -s "${x}" ] ; then
+ install -m0644 "${x}" "${dir}" || { ((ret|=1)); continue; }
+ ecompress --queue "${dir}/${x##*/}"
+ elif [ ! -e "${x}" ] ; then
+ echo "!!! ${0##*/}: $x does not exist" 1>&2
+ ((ret|=1))
+ fi
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/portage_with_autodep/bin/ebuild-helpers/doenvd b/portage_with_autodep/bin/ebuild-helpers/doenvd
new file mode 100755
index 0000000..28ab5d2
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doenvd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+ source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/etc/env.d/" \
+doins "$@"
diff --git a/portage_with_autodep/bin/ebuild-helpers/doexe b/portage_with_autodep/bin/ebuild-helpers/doexe
new file mode 100755
index 0000000..360800e
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doexe
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if [[ ! -d ${D}${_E_EXEDESTTREE_} ]] ; then
+ install -d "${D}${_E_EXEDESTTREE_}"
+fi
+
+TMP=$T/.doexe_tmp
+mkdir "$TMP"
+
+ret=0
+
+for x in "$@" ; do
+ if [ -L "${x}" ] ; then
+ cp "$x" "$TMP"
+ mysrc=$TMP/${x##*/}
+ elif [ -d "${x}" ] ; then
+ vecho "doexe: warning, skipping directory ${x}"
+ continue
+ else
+ mysrc="${x}"
+ fi
+ if [ -e "$mysrc" ] ; then
+ install $EXEOPTIONS "$mysrc" "$D$_E_EXEDESTTREE_"
+ else
+ echo "!!! ${0##*/}: $mysrc does not exist" 1>&2
+ false
+ fi
+ ((ret|=$?))
+done
+
+rm -rf "$TMP"
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret
diff --git a/portage_with_autodep/bin/ebuild-helpers/dohard b/portage_with_autodep/bin/ebuild-helpers/dohard
new file mode 100755
index 0000000..2270487
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dohard
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -ne 2 ]] ; then
+ echo "$0: two arguments needed" 1>&2
+ exit 1
+fi
+
+destdir=${2%/*}
+[[ ! -d ${D}${destdir} ]] && dodir "${destdir}"
+
+exec ln -f "${D}$1" "${D}$2"
diff --git a/portage_with_autodep/bin/ebuild-helpers/dohtml b/portage_with_autodep/bin/ebuild-helpers/dohtml
new file mode 100755
index 0000000..630629a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dohtml
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 2009-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+PYTHONPATH=$PORTAGE_PYM_PATH${PYTHONPATH:+:}$PYTHONPATH \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/dohtml.py" "$@"
+
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret
diff --git a/portage_with_autodep/bin/ebuild-helpers/doinfo b/portage_with_autodep/bin/ebuild-helpers/doinfo
new file mode 100755
index 0000000..54fb8da
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doinfo
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if [[ ! -d ${D}usr/share/info ]] ; then
+ install -d "${D}usr/share/info" || { helpers_die "${0##*/}: failed to install ${D}usr/share/info"; exit 1; }
+fi
+
+install -m0644 "$@" "${D}usr/share/info"
+rval=$?
+if [ $rval -ne 0 ] ; then
+ for x in "$@" ; do
+ [ -e "$x" ] || echo "!!! ${0##*/}: $x does not exist" 1>&2
+ done
+ helpers_die "${0##*/} failed"
+fi
+exit $rval
diff --git a/portage_with_autodep/bin/ebuild-helpers/doinitd b/portage_with_autodep/bin/ebuild-helpers/doinitd
new file mode 100755
index 0000000..b711e19
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doinitd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+ source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+exec \
+env \
+_E_EXEDESTTREE_="/etc/init.d/" \
+doexe "$@"
diff --git a/portage_with_autodep/bin/ebuild-helpers/doins b/portage_with_autodep/bin/ebuild-helpers/doins
new file mode 100755
index 0000000..7dec146
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doins
@@ -0,0 +1,155 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ ${0##*/} == dodoc ]] ; then
+ if [ $# -eq 0 ] ; then
+ # default_src_install may call dodoc with no arguments
+ # when DOC is defined but empty, so simply return
+ # sucessfully in this case.
+ exit 0
+ fi
+ export INSOPTIONS=-m0644
+ export INSDESTTREE=usr/share/doc/${PF}/${_E_DOCDESTTREE_}
+fi
+
+if [ $# -lt 1 ] ; then
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if [[ "$1" == "-r" ]] ; then
+ DOINSRECUR=y
+ shift
+else
+ DOINSRECUR=n
+fi
+
+if [[ ${INSDESTTREE#${D}} != "${INSDESTTREE}" ]]; then
+ vecho "-------------------------------------------------------" 1>&2
+ vecho "You should not use \${D} with helpers." 1>&2
+ vecho " --> ${INSDESTTREE}" 1>&2
+ vecho "-------------------------------------------------------" 1>&2
+ helpers_die "${0##*/} used with \${D}"
+ exit 1
+fi
+
+case "$EAPI" in
+ 0|1|2|3|3_pre2)
+ PRESERVE_SYMLINKS=n
+ ;;
+ *)
+ PRESERVE_SYMLINKS=y
+ ;;
+esac
+
+export TMP=$T/.doins_tmp
+# Use separate directories to avoid potential name collisions.
+mkdir -p "$TMP"/{1,2}
+
+[[ ! -d ${D}${INSDESTTREE} ]] && dodir "${INSDESTTREE}"
+
+_doins() {
+ local mysrc="$1" mydir="$2" cleanup="" rval
+
+ if [ -L "$mysrc" ] ; then
+ # Our fake $DISTDIR contains symlinks that should
+ # not be reproduced inside $D. In order to ensure
+ # that things like dodoc "$DISTDIR"/foo.pdf work
+ # as expected, we dereference symlinked files that
+ # refer to absolute paths inside
+ # $PORTAGE_ACTUAL_DISTDIR/.
+ if [ $PRESERVE_SYMLINKS = y ] && \
+ ! [[ $(readlink "$mysrc") == "$PORTAGE_ACTUAL_DISTDIR"/* ]] ; then
+ rm -rf "$D$INSDESTTREE/$mydir/${mysrc##*/}" || return $?
+ cp -P "$mysrc" "$D$INSDESTTREE/$mydir/${mysrc##*/}"
+ return $?
+ else
+ cp "$mysrc" "$TMP/2/${mysrc##*/}" || return $?
+ mysrc="$TMP/2/${mysrc##*/}"
+ cleanup=$mysrc
+ fi
+ fi
+
+ install ${INSOPTIONS} "${mysrc}" "${D}${INSDESTTREE}/${mydir}"
+ rval=$?
+ [[ -n ${cleanup} ]] && rm -f "${cleanup}"
+ [ $rval -ne 0 ] && echo "!!! ${0##*/}: $mysrc does not exist" 1>&2
+ return $rval
+}
+
+_xdoins() {
+ local -i failed=0
+ while read -r -d $'\0' x ; do
+ _doins "$x" "${x%/*}"
+ ((failed|=$?))
+ done
+ return $failed
+}
+
+success=0
+failed=0
+
+for x in "$@" ; do
+ if [[ $PRESERVE_SYMLINKS = n && -d $x ]] || \
+ [[ $PRESERVE_SYMLINKS = y && -d $x && ! -L $x ]] ; then
+ if [ "${DOINSRECUR}" == "n" ] ; then
+ if [[ ${0##*/} == dodoc ]] ; then
+ echo "!!! ${0##*/}: $x is a directory" 1>&2
+ ((failed|=1))
+ fi
+ continue
+ fi
+
+ while [ "$x" != "${x%/}" ] ; do
+ x=${x%/}
+ done
+ if [ "$x" = "${x%/*}" ] ; then
+ pushd "$PWD" >/dev/null
+ else
+ pushd "${x%/*}" >/dev/null
+ fi
+ x=${x##*/}
+ x_orig=$x
+ # Follow any symlinks recursively until we've got
+ # a normal directory for 'find' to traverse. The
+ # name of the symlink will be used for the name
+ # of the installed directory, as discussed in
+ # bug #239529.
+ while [ -L "$x" ] ; do
+ pushd "$(readlink "$x")" >/dev/null
+ x=${PWD##*/}
+ pushd "${PWD%/*}" >/dev/null
+ done
+ if [[ $x != $x_orig ]] ; then
+ mv "$x" "$TMP/1/$x_orig"
+ pushd "$TMP/1" >/dev/null
+ fi
+ find "$x_orig" -type d -exec dodir "${INSDESTTREE}/{}" \;
+ find "$x_orig" \( -type f -or -type l \) -print0 | _xdoins
+ if [[ ${PIPESTATUS[1]} -eq 0 ]] ; then
+ # NOTE: Even if only an empty directory is installed here, it
+ # still counts as success, since an empty directory given as
+ # an argument to doins -r should not trigger failure.
+ ((success|=1))
+ else
+ ((failed|=1))
+ fi
+ if [[ $x != $x_orig ]] ; then
+ popd >/dev/null
+ mv "$TMP/1/$x_orig" "$x"
+ fi
+ while popd >/dev/null 2>&1 ; do true ; done
+ else
+ _doins "${x}"
+ if [[ $? -eq 0 ]] ; then
+ ((success|=1))
+ else
+ ((failed|=1))
+ fi
+ fi
+done
+rm -rf "$TMP"
+[[ $failed -ne 0 || $success -eq 0 ]] && { helpers_die "${0##*/} failed"; exit 1; } || exit 0
diff --git a/portage_with_autodep/bin/ebuild-helpers/dolib b/portage_with_autodep/bin/ebuild-helpers/dolib
new file mode 100755
index 0000000..87ade42
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dolib
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+# Setup ABI cruft
+LIBDIR_VAR="LIBDIR_${ABI}"
+if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
+ CONF_LIBDIR=${!LIBDIR_VAR}
+fi
+unset LIBDIR_VAR
+# we need this to default to lib so that things dont break
+CONF_LIBDIR=${CONF_LIBDIR:-lib}
+libdir="${D}${DESTTREE}/${CONF_LIBDIR}"
+
+
+if [[ $# -lt 1 ]] ; then
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+if [[ ! -d ${libdir} ]] ; then
+ install -d "${libdir}" || { helpers_die "${0##*/}: failed to install ${libdir}"; exit 1; }
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [[ -e ${x} ]] ; then
+ if [[ ! -L ${x} ]] ; then
+ install ${LIBOPTIONS} "${x}" "${libdir}"
+ else
+ ln -s "$(readlink "${x}")" "${libdir}/${x##*/}"
+ fi
+ else
+ echo "!!! ${0##*/}: ${x} does not exist" 1>&2
+ false
+ fi
+ ((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/portage_with_autodep/bin/ebuild-helpers/dolib.a b/portage_with_autodep/bin/ebuild-helpers/dolib.a
new file mode 100755
index 0000000..d2279dc
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dolib.a
@@ -0,0 +1,6 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+exec env LIBOPTIONS="-m0644" \
+ dolib "$@"
diff --git a/portage_with_autodep/bin/ebuild-helpers/dolib.so b/portage_with_autodep/bin/ebuild-helpers/dolib.so
new file mode 100755
index 0000000..4bdbfab
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dolib.so
@@ -0,0 +1,6 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+exec env LIBOPTIONS="-m0755" \
+ dolib "$@"
diff --git a/portage_with_autodep/bin/ebuild-helpers/doman b/portage_with_autodep/bin/ebuild-helpers/doman
new file mode 100755
index 0000000..4561bef
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doman
@@ -0,0 +1,64 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+i18n=""
+
+ret=0
+
+for x in "$@" ; do
+ if [[ ${x:0:6} == "-i18n=" ]] ; then
+ i18n=${x:6}/
+ continue
+ fi
+ if [[ ${x:0:6} == ".keep_" ]] ; then
+ continue
+ fi
+
+ suffix=${x##*.}
+
+ # These will be automatically decompressed by ecompressdir.
+ if has ${suffix} Z gz bz2 ; then
+ realname=${x%.*}
+ suffix=${realname##*.}
+ fi
+
+ if has "${EAPI:-0}" 2 3 || [[ -z ${i18n} ]] \
+ && ! has "${EAPI:-0}" 0 1 \
+ && [[ $x =~ (.*)\.([a-z][a-z](_[A-Z][A-Z])?)\.(.*) ]]
+ then
+ name=${BASH_REMATCH[1]##*/}.${BASH_REMATCH[4]}
+ mandir=${BASH_REMATCH[2]}/man${suffix:0:1}
+ else
+ name=${x##*/}
+ mandir=${i18n#/}man${suffix:0:1}
+ fi
+
+
+ if [[ ${mandir} == *man[0-9n] ]] ; then
+ if [[ -s ${x} ]] ; then
+ if [[ ! -d ${D}/usr/share/man/${mandir} ]] ; then
+ install -d "${D}/usr/share/man/${mandir}"
+ fi
+
+ install -m0644 "${x}" "${D}/usr/share/man/${mandir}/${name}"
+ ((ret|=$?))
+ elif [[ ! -e ${x} ]] ; then
+ echo "!!! ${0##*/}: $x does not exist" 1>&2
+ ((ret|=1))
+ fi
+ else
+ vecho "doman: '${x}' is probably not a man page; skipping" 1>&2
+ ((ret|=1))
+ fi
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/portage_with_autodep/bin/ebuild-helpers/domo b/portage_with_autodep/bin/ebuild-helpers/domo
new file mode 100755
index 0000000..4737f44
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/domo
@@ -0,0 +1,34 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+mynum=${#}
+if [ ${mynum} -lt 1 ] ; then
+ helpers_die "${0}: at least one argument needed"
+ exit 1
+fi
+if [ ! -d "${D}${DESTTREE}/share/locale" ] ; then
+ install -d "${D}${DESTTREE}/share/locale/"
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [ -e "${x}" ] ; then
+ mytiny="${x##*/}"
+ mydir="${D}${DESTTREE}/share/locale/${mytiny%.*}/LC_MESSAGES"
+ if [ ! -d "${mydir}" ] ; then
+ install -d "${mydir}"
+ fi
+ install -m0644 "${x}" "${mydir}/${MOPREFIX}.mo"
+ else
+ echo "!!! ${0##*/}: $x does not exist" 1>&2
+ false
+ fi
+ ((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret
diff --git a/portage_with_autodep/bin/ebuild-helpers/dosbin b/portage_with_autodep/bin/ebuild-helpers/dosbin
new file mode 100755
index 0000000..87a3091
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dosbin
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+if [[ ! -d ${D}${DESTTREE}/sbin ]] ; then
+ install -d "${D}${DESTTREE}/sbin" || { helpers_die "${0##*/}: failed to install ${D}${DESTTREE}/sbin"; exit 2; }
+fi
+
+ret=0
+
+for x in "$@" ; do
+ if [[ -e ${x} ]] ; then
+ install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${D}${DESTTREE}/sbin"
+ else
+ echo "!!! ${0##*/}: ${x} does not exist" 1>&2
+ false
+ fi
+ ((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/portage_with_autodep/bin/ebuild-helpers/dosed b/portage_with_autodep/bin/ebuild-helpers/dosed
new file mode 100755
index 0000000..afc949b
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dosed
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+ echo "!!! ${0##*/}: at least one argument needed" >&2
+ exit 1
+fi
+
+ret=0
+file_found=0
+mysed="s:${D}::g"
+
+for x in "$@" ; do
+ y=$D${x#/}
+ if [ -e "${y}" ] ; then
+ if [ -f "${y}" ] ; then
+ file_found=1
+ sed -i -e "${mysed}" "${y}"
+ else
+ echo "${y} is not a regular file!" >&2
+ false
+ fi
+ ((ret|=$?))
+ else
+ mysed="${x}"
+ fi
+done
+
+if [ $file_found = 0 ] ; then
+ echo "!!! ${0##*/}: $y does not exist" 1>&2
+ ((ret|=1))
+fi
+
+exit $ret
diff --git a/portage_with_autodep/bin/ebuild-helpers/dosym b/portage_with_autodep/bin/ebuild-helpers/dosym
new file mode 100755
index 0000000..500dad0
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dosym
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -ne 2 ]] ; then
+ helpers_die "${0##*/}: two arguments needed"
+ exit 1
+fi
+
+destdir=${2%/*}
+[[ ! -d ${D}${destdir} ]] && dodir "${destdir}"
+
+ln -snf "$1" "${D}$2"
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret
diff --git a/portage_with_autodep/bin/ebuild-helpers/ecompress b/portage_with_autodep/bin/ebuild-helpers/ecompress
new file mode 100755
index 0000000..b61421b
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/ecompress
@@ -0,0 +1,161 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+# setup compression stuff
+PORTAGE_COMPRESS=${PORTAGE_COMPRESS-bzip2}
+[[ -z ${PORTAGE_COMPRESS} ]] && exit 0
+
+if [[ ${PORTAGE_COMPRESS_FLAGS+set} != "set" ]] ; then
+ case ${PORTAGE_COMPRESS} in
+ bzip2|gzip) PORTAGE_COMPRESS_FLAGS="-9";;
+ esac
+fi
+
+# decompress_args(suffix, binary)
+# - suffix: the compression suffix to work with
+# - binary: the program to execute that'll compress/decompress
+# new_args: global array used to return revised arguments
+decompress_args() {
+ local suffix=$1 binary=$2
+ shift 2
+
+ # Initialize the global new_args array.
+ new_args=()
+ declare -a decompress_args=()
+ local x i=0 decompress_count=0
+ for x in "$@" ; do
+ if [[ ${x%$suffix} = $x ]] ; then
+ new_args[$i]=$x
+ else
+ new_args[$i]=${x%$suffix}
+ decompress_args[$decompress_count]=$x
+ ((decompress_count++))
+ fi
+ ((i++))
+ done
+
+ if [ $decompress_count -gt 0 ] ; then
+ ${binary} "${decompress_args[@]}"
+ if [ $? -ne 0 ] ; then
+ # Apparently decompression failed for one or more files, so
+ # drop those since we don't want to compress them twice.
+ new_args=()
+ local x i=0
+ for x in "$@" ; do
+ if [[ ${x%$suffix} = $x ]] ; then
+ new_args[$i]=$x
+ ((i++))
+ elif [[ -f ${x%$suffix} ]] ; then
+ new_args[$i]=${x%$suffix}
+ ((i++))
+ else
+ # Apparently decompression failed for this one, so drop
+ # it since we don't want to compress it twice.
+ true
+ fi
+ done
+ fi
+ fi
+}
+
+case $1 in
+ --suffix)
+ [[ -n $2 ]] && vecho "${0##*/}: --suffix takes no additional arguments" 1>&2
+
+ if [[ ! -e ${T}/.ecompress.suffix ]] ; then
+ set -e
+ tmpdir="${T}"/.ecompress$$.${RANDOM}
+ mkdir "${tmpdir}"
+ cd "${tmpdir}"
+ # we have to fill the file enough so that there is something
+ # to compress as some programs will refuse to do compression
+ # if it cannot actually compress the file
+ echo {0..1000} > compressme
+ ${PORTAGE_COMPRESS} ${PORTAGE_COMPRESS_FLAGS} compressme > /dev/null
+ # If PORTAGE_COMPRESS_FLAGS contains -k then we need to avoid
+ # having our glob match the uncompressed file here.
+ suffix=$(echo compressme.*)
+ [[ -z $suffix || "$suffix" == "compressme.*" ]] && \
+ suffix=$(echo compressme*)
+ suffix=${suffix#compressme}
+ cd /
+ rm -rf "${tmpdir}"
+ echo "${suffix}" > "${T}/.ecompress.suffix"
+ fi
+ cat "${T}/.ecompress.suffix"
+ ;;
+ --bin)
+ [[ -n $2 ]] && vecho "${0##*/}: --bin takes no additional arguments" 1>&2
+
+ echo "${PORTAGE_COMPRESS} ${PORTAGE_COMPRESS_FLAGS}"
+ ;;
+ --queue)
+ shift
+ ret=0
+ for x in "${@/%/.ecompress.file}" ; do
+ >> "$x"
+ ((ret|=$?))
+ done
+ [[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+ exit $ret
+ ;;
+ --dequeue)
+ [[ -n $2 ]] && vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
+ find "${D}" -name '*.ecompress.file' -print0 \
+ | sed -e 's:\.ecompress\.file::g' \
+ | ${XARGS} -0 ecompress
+ find "${D}" -name '*.ecompress.file' -print0 | ${XARGS} -0 rm -f
+ ;;
+ --*)
+ helpers_die "${0##*/}: unknown arguments '$*'"
+ exit 1
+ ;;
+ *)
+ # Since dodoc calls ecompress on files that are already compressed,
+ # perform decompression here (similar to ecompressdir behavior).
+ decompress_args ".Z" "gunzip -f" "$@"
+ set -- "${new_args[@]}"
+ decompress_args ".gz" "gunzip -f" "$@"
+ set -- "${new_args[@]}"
+ decompress_args ".bz2" "bunzip2 -f" "$@"
+ set -- "${new_args[@]}"
+
+ mask_ext_re=""
+ set -f
+ for x in $PORTAGE_COMPRESS_EXCLUDE_SUFFIXES ; do
+ mask_ext_re+="|$x"
+ done
+ set +f
+ mask_ext_re="^(${mask_ext_re:1})\$"
+ declare -a filtered_args=()
+ i=0
+ for x in "$@" ; do
+ [[ ${x##*.} =~ $mask_ext_re ]] && continue
+ [[ -s ${x} ]] || continue
+ filtered_args[$i]=$x
+ ((i++))
+ done
+ [ $i -eq 0 ] && exit 0
+ set -- "${filtered_args[@]}"
+
+ # If a compressed version of the file already exists, simply
+ # delete it so that the compressor doesn't whine (bzip2 will
+ # complain and skip, gzip will prompt for input)
+ suffix=$(ecompress --suffix)
+ [[ -n ${suffix} ]] && echo -n "${@/%/${suffix}$'\001'}" | \
+ tr '\001' '\000' | ${XARGS} -0 rm -f
+ # Finally, let's actually do some real work
+ "${PORTAGE_COMPRESS}" ${PORTAGE_COMPRESS_FLAGS} "$@"
+ ret=$?
+ [[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+ exit $ret
+ ;;
+esac
diff --git a/portage_with_autodep/bin/ebuild-helpers/ecompressdir b/portage_with_autodep/bin/ebuild-helpers/ecompressdir
new file mode 100755
index 0000000..7a95120
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/ecompressdir
@@ -0,0 +1,143 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+ helpers_die "${0##*/}: at least one argument needed"
+ exit 1
+fi
+
+case $1 in
+ --ignore)
+ shift
+ for skip in "$@" ; do
+ [[ -d ${D}${skip} || -f ${D}${skip} ]] \
+ && >> "${D}${skip}.ecompress.skip"
+ done
+ exit 0
+ ;;
+ --queue)
+ shift
+ set -- "${@/%/.ecompress.dir}"
+ set -- "${@/#/${D}}"
+ ret=0
+ for x in "$@" ; do
+ >> "$x"
+ ((ret|=$?))
+ done
+ [[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+ exit $ret
+ ;;
+ --dequeue)
+ [[ -n $2 ]] && vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
+ find "${D}" -name '*.ecompress.dir' -print0 \
+ | sed -e 's:\.ecompress\.dir::g' -e "s:${D}:/:g" \
+ | ${XARGS} -0 ecompressdir
+ find "${D}" -name '*.ecompress.skip' -print0 | ${XARGS} -0 rm -f
+ exit 0
+ ;;
+ --*)
+ helpers_die "${0##*/}: unknown arguments '$*'"
+ exit 1
+ ;;
+esac
+
+# figure out the new suffix
+suffix=$(ecompress --suffix)
+
+# funk_up_dir(action, suffix, binary)
+# - action: compress or decompress
+# - suffix: the compression suffix to work with
+# - binary: the program to execute that'll compress/decompress
+# The directory we act on is implied in the ${dir} variable
+funk_up_dir() {
+ local act=$1 suffix=$2 binary=$3
+
+ local negate=""
+ [[ ${act} == "compress" ]] && negate="!"
+
+ # first we act on all the files
+ find "${dir}" -type f ${negate} -iname '*'${suffix} -print0 | ${XARGS} -0 ${binary}
+ ((ret|=$?))
+
+ find "${dir}" -type l -print0 | \
+ while read -r -d $'\0' brokenlink ; do
+ [[ -e ${brokenlink} ]] && continue
+ olddest=$(readlink "${brokenlink}")
+ [[ ${act} == "compress" ]] \
+ && newdest="${olddest}${suffix}" \
+ || newdest="${olddest%${suffix}}"
+ rm -f "${brokenlink}"
+ [[ ${act} == "compress" ]] \
+ && ln -snf "${newdest}" "${brokenlink}${suffix}" \
+ || ln -snf "${newdest}" "${brokenlink%${suffix}}"
+ ((ret|=$?))
+ done
+}
+
+# _relocate_skip_dirs(srctree, dsttree)
+# Move all files and directories we want to skip running compression
+# on from srctree to dsttree.
+_relocate_skip_dirs() {
+ local srctree="$1" dsttree="$2"
+
+ [[ -d ${srctree} ]] || return 0
+
+ find "${srctree}" -name '*.ecompress.skip' -print0 | \
+ while read -r -d $'\0' src ; do
+ src=${src%.ecompress.skip}
+ dst="${dsttree}${src#${srctree}}"
+ parent=${dst%/*}
+ mkdir -p "${parent}"
+ mv "${src}" "${dst}"
+ mv "${src}.ecompress.skip" "${dst}.ecompress.skip"
+ done
+}
+hide_skip_dirs() { _relocate_skip_dirs "${D}" "${T}"/ecompress-skip/ ; }
+restore_skip_dirs() { _relocate_skip_dirs "${T}"/ecompress-skip/ "${D}" ; }
+
+ret=0
+
+rm -rf "${T}"/ecompress-skip
+
+for dir in "$@" ; do
+ dir=${dir#/}
+ dir="${D}${dir}"
+ if [[ ! -d ${dir} ]] ; then
+ vecho "${0##*/}: /${dir#${D}} does not exist!"
+ continue
+ fi
+ cd "${dir}"
+ actual_dir=${dir}
+ dir=. # use relative path to avoid 'Argument list too long' errors
+
+ # hide all the stuff we want to skip
+ hide_skip_dirs "${dir}"
+
+ # since we've been requested to compress the whole dir,
+ # delete any individual queued requests
+ rm -f "${actual_dir}.ecompress.dir"
+ find "${dir}" -type f -name '*.ecompress.file' -print0 | ${XARGS} -0 rm -f
+
+ # not uncommon for packages to compress doc files themselves
+ funk_up_dir "decompress" ".Z" "gunzip -f"
+ funk_up_dir "decompress" ".gz" "gunzip -f"
+ funk_up_dir "decompress" ".bz2" "bunzip2 -f"
+
+ # forcibly break all hard links as some compressors whine about it
+ find "${dir}" -type f -links +1 -exec env file="{}" sh -c \
+ 'cp -p "${file}" "${file}.ecompress.break" ; mv -f "${file}.ecompress.break" "${file}"' \;
+
+ # now lets do our work
+ [[ -z ${suffix} ]] && continue
+ vecho "${0##*/}: $(ecompress --bin) /${actual_dir#${D}}"
+ funk_up_dir "compress" "${suffix}" "ecompress"
+
+ # finally, restore the skipped stuff
+ restore_skip_dirs
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}
diff --git a/portage_with_autodep/bin/ebuild-helpers/eerror b/portage_with_autodep/bin/ebuild-helpers/eerror
new file mode 120000
index 0000000..a403c75
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/eerror
@@ -0,0 +1 @@
+elog \ No newline at end of file
diff --git a/portage_with_autodep/bin/ebuild-helpers/einfo b/portage_with_autodep/bin/ebuild-helpers/einfo
new file mode 120000
index 0000000..a403c75
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/einfo
@@ -0,0 +1 @@
+elog \ No newline at end of file
diff --git a/portage_with_autodep/bin/ebuild-helpers/elog b/portage_with_autodep/bin/ebuild-helpers/elog
new file mode 100755
index 0000000..a2303af
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/elog
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+${0##*/} "$@"
diff --git a/portage_with_autodep/bin/ebuild-helpers/emake b/portage_with_autodep/bin/ebuild-helpers/emake
new file mode 100755
index 0000000..d842781
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/emake
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# emake: Supplies some default parameters to GNU make. At the moment the
+# only parameter supplied is -jN, where N is a number of
+# parallel processes that should be ideal for the running host
+# (e.g. on a single-CPU machine, N=2). The MAKEOPTS variable
+# is set in make.globals. We don't source make.globals
+# here because emake is only called from an ebuild.
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $PORTAGE_QUIET != 1 ]] ; then
+ (
+ for arg in ${MAKE:-make} $MAKEOPTS $EXTRA_EMAKE "$@" ; do
+ [[ ${arg} == *" "* ]] \
+ && printf "'%s' " "${arg}" \
+ || printf "%s " "${arg}"
+ done
+ printf "\n"
+ ) >&2
+fi
+
+${MAKE:-make} ${MAKEOPTS} ${EXTRA_EMAKE} "$@"
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret
diff --git a/portage_with_autodep/bin/ebuild-helpers/eqawarn b/portage_with_autodep/bin/ebuild-helpers/eqawarn
new file mode 120000
index 0000000..a403c75
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/eqawarn
@@ -0,0 +1 @@
+elog \ No newline at end of file
diff --git a/portage_with_autodep/bin/ebuild-helpers/ewarn b/portage_with_autodep/bin/ebuild-helpers/ewarn
new file mode 120000
index 0000000..a403c75
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/ewarn
@@ -0,0 +1 @@
+elog \ No newline at end of file
diff --git a/portage_with_autodep/bin/ebuild-helpers/fowners b/portage_with_autodep/bin/ebuild-helpers/fowners
new file mode 100755
index 0000000..4cc6bfa
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/fowners
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+# we can't prefix all arguments because
+# chown takes random options
+slash="/"
+chown "${@/#${slash}/${D}${slash}}"
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret
diff --git a/portage_with_autodep/bin/ebuild-helpers/fperms b/portage_with_autodep/bin/ebuild-helpers/fperms
new file mode 100755
index 0000000..0260bdc
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/fperms
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+# we can't prefix all arguments because
+# chmod takes random options
+slash="/"
+chmod "${@/#${slash}/${D}${slash}}"
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret
diff --git a/portage_with_autodep/bin/ebuild-helpers/newbin b/portage_with_autodep/bin/ebuild-helpers/newbin
new file mode 100755
index 0000000..30f19b0
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newbin
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec dobin "${T}/${2}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/newconfd b/portage_with_autodep/bin/ebuild-helpers/newconfd
new file mode 100755
index 0000000..5752cfa
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newconfd
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec doconfd "${T}/${2}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/newdoc b/portage_with_autodep/bin/ebuild-helpers/newdoc
new file mode 100755
index 0000000..f97ce0d
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newdoc
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec dodoc "${T}/${2}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/newenvd b/portage_with_autodep/bin/ebuild-helpers/newenvd
new file mode 100755
index 0000000..83c556e
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newenvd
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec doenvd "${T}/${2}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/newexe b/portage_with_autodep/bin/ebuild-helpers/newexe
new file mode 100755
index 0000000..92dbe9f
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newexe
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec doexe "${T}/${2}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/newinitd b/portage_with_autodep/bin/ebuild-helpers/newinitd
new file mode 100755
index 0000000..fc6003a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newinitd
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec doinitd "${T}/${2}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/newins b/portage_with_autodep/bin/ebuild-helpers/newins
new file mode 100755
index 0000000..065477f
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newins
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" || exit $?
+case "$EAPI" in
+ 0|1|2|3|3_pre2)
+ cp "$1" "$T/$2" || exit $?
+ ;;
+ *)
+ cp -P "$1" "$T/$2"
+ ret=$?
+ if [[ $ret -ne 0 ]] ; then
+ helpers_die "${0##*/} failed"
+ exit $ret
+ fi
+ ;;
+esac
+doins "${T}/${2}"
+ret=$?
+rm -rf "${T}/${2}"
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret
diff --git a/portage_with_autodep/bin/ebuild-helpers/newlib.a b/portage_with_autodep/bin/ebuild-helpers/newlib.a
new file mode 100755
index 0000000..eef4104
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newlib.a
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec dolib.a "${T}/${2}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/newlib.so b/portage_with_autodep/bin/ebuild-helpers/newlib.so
new file mode 100755
index 0000000..c8696f3
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newlib.so
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec dolib.so "${T}/${2}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/newman b/portage_with_autodep/bin/ebuild-helpers/newman
new file mode 100755
index 0000000..ffb8a2d
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newman
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec doman "${T}/${2}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/newsbin b/portage_with_autodep/bin/ebuild-helpers/newsbin
new file mode 100755
index 0000000..82242aa
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newsbin
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+ helpers_die "${0##*/}: Need two arguments, old file and new file"
+ exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+ helpers_die "!!! ${0##*/}: $1 does not exist"
+ exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec dosbin "${T}/${2}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/portageq b/portage_with_autodep/bin/ebuild-helpers/portageq
new file mode 100755
index 0000000..ec30b66
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/portageq
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Copyright 2009-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+PYTHONPATH=$PORTAGE_PYM_PATH${PYTHONPATH:+:}$PYTHONPATH \
+ exec "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/portageq" "$@"
diff --git a/portage_with_autodep/bin/ebuild-helpers/prepall b/portage_with_autodep/bin/ebuild-helpers/prepall
new file mode 100755
index 0000000..701ecba
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepall
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if has chflags $FEATURES ; then
+ # Save all the file flags for restoration at the end of prepall.
+ mtree -c -p "${D}" -k flags > "${T}/bsdflags.mtree"
+ # Remove all the file flags so that prepall can do anything necessary.
+ chflags -R noschg,nouchg,nosappnd,nouappnd "${D}"
+ chflags -R nosunlnk,nouunlnk "${D}" 2>/dev/null
+fi
+
+prepallman
+prepallinfo
+
+prepallstrip
+
+if has chflags $FEATURES ; then
+ # Restore all the file flags that were saved at the beginning of prepall.
+ mtree -U -e -p "${D}" -k flags < "${T}/bsdflags.mtree" &> /dev/null
+fi
diff --git a/portage_with_autodep/bin/ebuild-helpers/prepalldocs b/portage_with_autodep/bin/ebuild-helpers/prepalldocs
new file mode 100755
index 0000000..fdc735d
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepalldocs
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -n $1 ]] ; then
+ vecho "${0##*/}: invalid usage; takes no arguments" 1>&2
+fi
+
+cd "${D}"
+[[ -d usr/share/doc ]] || exit 0
+
+ecompressdir --ignore /usr/share/doc/${PF}/html
+ecompressdir --queue /usr/share/doc
diff --git a/portage_with_autodep/bin/ebuild-helpers/prepallinfo b/portage_with_autodep/bin/ebuild-helpers/prepallinfo
new file mode 100755
index 0000000..0d97803
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepallinfo
@@ -0,0 +1,9 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+[[ ! -d ${D}usr/share/info ]] && exit 0
+
+exec prepinfo
diff --git a/portage_with_autodep/bin/ebuild-helpers/prepallman b/portage_with_autodep/bin/ebuild-helpers/prepallman
new file mode 100755
index 0000000..e50de6d
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepallman
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+# replaced by controllable compression in EAPI 4
+has "${EAPI}" 0 1 2 3 || exit 0
+
+ret=0
+
+find "${D}" -type d -name man > "${T}"/prepallman.filelist
+while read -r mandir ; do
+ mandir=${mandir#${D}}
+ prepman "${mandir%/man}"
+ ((ret|=$?))
+done < "${T}"/prepallman.filelist
+
+exit ${ret}
diff --git a/portage_with_autodep/bin/ebuild-helpers/prepallstrip b/portage_with_autodep/bin/ebuild-helpers/prepallstrip
new file mode 100755
index 0000000..ec12ce6
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepallstrip
@@ -0,0 +1,5 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+exec prepstrip "${D}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/prepinfo b/portage_with_autodep/bin/ebuild-helpers/prepinfo
new file mode 100755
index 0000000..691fd13
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepinfo
@@ -0,0 +1,34 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+ infodir="/usr/share/info"
+else
+ if [[ -d ${D}$1/share/info ]] ; then
+ infodir="$1/share/info"
+ else
+ infodir="$1/info"
+ fi
+fi
+
+if [[ ! -d ${D}${infodir} ]] ; then
+ if [[ -n $1 ]] ; then
+ vecho "${0##*/}: '${infodir}' does not exist!"
+ exit 1
+ else
+ exit 0
+ fi
+fi
+
+find "${D}${infodir}" -type d -print0 | while read -r -d $'\0' x ; do
+ for f in "${x}"/.keepinfodir*; do
+ [[ -e ${f} ]] && continue 2
+ done
+ rm -f "${x}"/dir{,.info}{,.gz,.bz2}
+done
+
+has "${EAPI}" 0 1 2 3 || exit 0
+exec ecompressdir --queue "${infodir}"
diff --git a/portage_with_autodep/bin/ebuild-helpers/preplib b/portage_with_autodep/bin/ebuild-helpers/preplib
new file mode 100755
index 0000000..76aabe6
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/preplib
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+eqawarn "QA Notice: Deprecated call to 'preplib'"
+
+LIBDIR_VAR="LIBDIR_${ABI}"
+if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
+ CONF_LIBDIR="${!LIBDIR_VAR}"
+fi
+unset LIBDIR_VAR
+
+if [ -z "${CONF_LIBDIR}" ]; then
+ # we need this to default to lib so that things dont break
+ CONF_LIBDIR="lib"
+fi
+
+if [ -z "$1" ] ; then
+ z="${D}usr/${CONF_LIBDIR}"
+else
+ z="${D}$1/${CONF_LIBDIR}"
+fi
+
+if [ -d "${z}" ] ; then
+ ldconfig -n -N "${z}"
+fi
diff --git a/portage_with_autodep/bin/ebuild-helpers/prepman b/portage_with_autodep/bin/ebuild-helpers/prepman
new file mode 100755
index 0000000..c9add8a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepman
@@ -0,0 +1,32 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+ mandir="${D}usr/share/man"
+else
+ mandir="${D}$1/man"
+fi
+
+if [[ ! -d ${mandir} ]] ; then
+ eqawarn "QA Notice: prepman called with non-existent dir '${mandir#${D}}'"
+ exit 0
+fi
+
+# replaced by controllable compression in EAPI 4
+has "${EAPI}" 0 1 2 3 || exit 0
+
+shopt -s nullglob
+
+really_is_mandir=0
+
+# use some heuristics to test if this is a real mandir
+for subdir in "${mandir}"/man* "${mandir}"/*/man* ; do
+ [[ -d ${subdir} ]] && really_is_mandir=1 && break
+done
+
+[[ ${really_is_mandir} == 1 ]] && exec ecompressdir --queue "${mandir#${D}}"
+
+exit 0
diff --git a/portage_with_autodep/bin/ebuild-helpers/prepstrip b/portage_with_autodep/bin/ebuild-helpers/prepstrip
new file mode 100755
index 0000000..d25259d
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepstrip
@@ -0,0 +1,193 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+banner=false
+SKIP_STRIP=false
+if has nostrip ${FEATURES} || \
+ has strip ${RESTRICT}
+then
+ SKIP_STRIP=true
+ banner=true
+ has installsources ${FEATURES} || exit 0
+fi
+
+STRIP=${STRIP:-${CHOST}-strip}
+type -P -- ${STRIP} > /dev/null || STRIP=strip
+OBJCOPY=${OBJCOPY:-${CHOST}-objcopy}
+type -P -- ${OBJCOPY} > /dev/null || OBJCOPY=objcopy
+
+# We'll leave out -R .note for now until we can check out the relevance
+# of the section when it has the ALLOC flag set on it ...
+export SAFE_STRIP_FLAGS="--strip-unneeded"
+export PORTAGE_STRIP_FLAGS=${PORTAGE_STRIP_FLAGS-${SAFE_STRIP_FLAGS} -R .comment}
+prepstrip_sources_dir=/usr/src/debug/${CATEGORY}/${PF}
+
+if has installsources ${FEATURES} && ! type -P debugedit >/dev/null ; then
+ ewarn "FEATURES=installsources is enabled but the debugedit binary could not"
+ ewarn "be found. This feature will not work unless debugedit is installed!"
+fi
+
+unset ${!INODE_*}
+
+inode_var_name() {
+ if [[ $USERLAND = BSD ]] ; then
+ stat -f 'INODE_%d_%i' "$1"
+ else
+ stat -c 'INODE_%d_%i' "$1"
+ fi
+}
+
+save_elf_sources() {
+ has installsources ${FEATURES} || return 0
+ has installsources ${RESTRICT} && return 0
+ type -P debugedit >/dev/null || return 0
+
+ local x=$1
+ local inode=$(inode_var_name "$x")
+ [[ -n ${!inode} ]] && return 0
+ debugedit -b "${WORKDIR}" -d "${prepstrip_sources_dir}" \
+ -l "${T}"/debug.sources "${x}"
+}
+
+save_elf_debug() {
+ has splitdebug ${FEATURES} || return 0
+
+ local x=$1
+ local y="${D}usr/lib/debug/${x:${#D}}.debug"
+
+ # dont save debug info twice
+ [[ ${x} == *".debug" ]] && return 0
+
+ # this will recompute the build-id, but for now that's ok
+ local buildid="$( type -P debugedit >/dev/null && debugedit -i "${x}" )"
+
+ mkdir -p $(dirname "${y}")
+
+ local inode=$(inode_var_name "$x")
+ if [[ -n ${!inode} ]] ; then
+ ln "${D}usr/lib/debug/${!inode:${#D}}.debug" "$y"
+ else
+ eval $inode=\$x
+ ${OBJCOPY} --only-keep-debug "${x}" "${y}"
+ ${OBJCOPY} --add-gnu-debuglink="${y}" "${x}"
+ [[ -g ${x} ]] && chmod go-r "${y}"
+ [[ -u ${x} ]] && chmod go-r "${y}"
+ chmod a-x,o-w "${y}"
+ fi
+
+ if [[ -n ${buildid} ]] ; then
+ local buildid_dir="${D}usr/lib/debug/.build-id/${buildid:0:2}"
+ local buildid_file="${buildid_dir}/${buildid:2}"
+ mkdir -p "${buildid_dir}"
+ ln -s "../../${x:${#D}}.debug" "${buildid_file}.debug"
+ ln -s "/${x:${#D}}" "${buildid_file}"
+ fi
+}
+
+# The existance of the section .symtab tells us that a binary is stripped.
+# We want to log already stripped binaries, as this may be a QA violation.
+# They prevent us from getting the splitdebug data.
+if ! has binchecks ${RESTRICT} && \
+ ! has strip ${RESTRICT} ; then
+ log=$T/scanelf-already-stripped.log
+ qa_var="QA_PRESTRIPPED_${ARCH/-/_}"
+ [[ -n ${!qa_var} ]] && QA_PRESTRIPPED="${!qa_var}"
+ scanelf -yqRBF '#k%F' -k '!.symtab' "$@" | sed -e "s#^$D##" > "$log"
+ if [[ -n $QA_PRESTRIPPED && -s $log && \
+ ${QA_STRICT_PRESTRIPPED-unset} = unset ]] ; then
+ shopts=$-
+ set -o noglob
+ for x in $QA_PRESTRIPPED ; do
+ sed -e "s#^${x#/}\$##" -i "$log"
+ done
+ set +o noglob
+ set -$shopts
+ fi
+ sed -e "/^\$/d" -e "s#^#/#" -i "$log"
+ if [[ -s $log ]] ; then
+ vecho -e "\n"
+ eqawarn "QA Notice: Pre-stripped files found:"
+ eqawarn "$(<"$log")"
+ else
+ rm -f "$log"
+ fi
+fi
+
+# Now we look for unstripped binaries.
+for x in \
+ $(scanelf -yqRBF '#k%F' -k '.symtab' "$@") \
+ $(find "$@" -type f -name '*.a')
+do
+ if ! ${banner} ; then
+ vecho "strip: ${STRIP} ${PORTAGE_STRIP_FLAGS}"
+ banner=true
+ fi
+
+ f=$(file "${x}") || continue
+ [[ -z ${f} ]] && continue
+
+ if ! ${SKIP_STRIP} ; then
+ # The noglob funk is to support STRIP_MASK="/*/booga" and to keep
+ # the for loop from expanding the globs.
+ # The eval echo is to support STRIP_MASK="/*/{booga,bar}" sex.
+ set -o noglob
+ strip_this=true
+ for m in $(eval echo ${STRIP_MASK}) ; do
+ [[ /${x#${D}} == ${m} ]] && strip_this=false && break
+ done
+ set +o noglob
+ else
+ strip_this=false
+ fi
+
+ # only split debug info for final linked objects
+ # or kernel modules as debuginfo for intermediatary
+ # files (think crt*.o from gcc/glibc) is useless and
+ # actually causes problems. install sources for all
+ # elf types though cause that stuff is good.
+
+ if [[ ${f} == *"current ar archive"* ]] ; then
+ vecho " ${x:${#D}}"
+ if ${strip_this} ; then
+ # hmm, can we split debug/sources for .a ?
+ ${STRIP} -g "${x}"
+ fi
+ elif [[ ${f} == *"SB executable"* || ${f} == *"SB shared object"* ]] ; then
+ vecho " ${x:${#D}}"
+ save_elf_sources "${x}"
+ if ${strip_this} ; then
+ save_elf_debug "${x}"
+ ${STRIP} ${PORTAGE_STRIP_FLAGS} "${x}"
+ fi
+ elif [[ ${f} == *"SB relocatable"* ]] ; then
+ vecho " ${x:${#D}}"
+ save_elf_sources "${x}"
+ if ${strip_this} ; then
+ [[ ${x} == *.ko ]] && save_elf_debug "${x}"
+ ${STRIP} ${SAFE_STRIP_FLAGS} "${x}"
+ fi
+ fi
+done
+
+if [[ -s ${T}/debug.sources ]] && \
+ has installsources ${FEATURES} && \
+ ! has installsources ${RESTRICT} && \
+ type -P debugedit >/dev/null
+then
+ vecho "installsources: rsyncing source files"
+ [[ -d ${D}${prepstrip_sources_dir} ]] || mkdir -p "${D}${prepstrip_sources_dir}"
+ grep -zv '/<[^/>]*>$' "${T}"/debug.sources | \
+ (cd "${WORKDIR}"; LANG=C sort -z -u | \
+ rsync -tL0 --files-from=- "${WORKDIR}/" "${D}${prepstrip_sources_dir}/" )
+
+ # Preserve directory structure.
+ # Needed after running save_elf_sources.
+ # https://bugzilla.redhat.com/show_bug.cgi?id=444310
+ while read -r -d $'\0' emptydir
+ do
+ >> "$emptydir"/.keepdir
+ done < <(find "${D}${prepstrip_sources_dir}/" -type d -empty -print0)
+fi
diff --git a/portage_with_autodep/bin/ebuild-helpers/sed b/portage_with_autodep/bin/ebuild-helpers/sed
new file mode 100755
index 0000000..b21e856
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/sed
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+scriptpath=${BASH_SOURCE[0]}
+scriptname=${scriptpath##*/}
+
+if [[ sed == ${scriptname} ]] && [[ -n ${ESED} ]]; then
+ exec ${ESED} "$@"
+elif type -P g${scriptname} > /dev/null ; then
+ exec g${scriptname} "$@"
+else
+ old_IFS="${IFS}"
+ IFS=":"
+
+ for path in $PATH; do
+ [[ ${path}/${scriptname} == ${scriptpath} ]] && continue
+ if [[ -x ${path}/${scriptname} ]]; then
+ exec ${path}/${scriptname} "$@"
+ exit 0
+ fi
+ done
+
+ IFS="${old_IFS}"
+fi
+
+exit 1
diff --git a/portage_with_autodep/bin/ebuild-ipc b/portage_with_autodep/bin/ebuild-ipc
new file mode 100755
index 0000000..43e4a02
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-ipc
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+PYTHONPATH=$PORTAGE_PYM_PATH${PYTHONPATH:+:}$PYTHONPATH \
+ exec "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/ebuild-ipc.py" "$@"
diff --git a/portage_with_autodep/bin/ebuild-ipc.py b/portage_with_autodep/bin/ebuild-ipc.py
new file mode 100755
index 0000000..68ad985
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-ipc.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# This is a helper which ebuild processes can use
+# to communicate with portage's main python process.
+
+import errno
+import logging
+import os
+import pickle
+import select
+import signal
+import sys
+import time
+
+def debug_signal(signum, frame):
+ import pdb
+ pdb.set_trace()
+signal.signal(signal.SIGUSR1, debug_signal)
+
+# Avoid sandbox violations after python upgrade.
+pym_path = os.path.join(os.path.dirname(
+ os.path.dirname(os.path.realpath(__file__))), "pym")
+if os.environ.get("SANDBOX_ON") == "1":
+ sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+ if pym_path not in sandbox_write:
+ sandbox_write.append(pym_path)
+ os.environ["SANDBOX_WRITE"] = \
+ ":".join(filter(None, sandbox_write))
+
+import portage
+portage._disable_legacy_globals()
+
+class EbuildIpc(object):
+
+ # Timeout for each individual communication attempt (we retry
+ # as long as the daemon process appears to be alive).
+ _COMMUNICATE_RETRY_TIMEOUT_SECONDS = 15
+ _BUFSIZE = 4096
+
+ def __init__(self):
+ self.fifo_dir = os.environ['PORTAGE_BUILDDIR']
+ self.ipc_in_fifo = os.path.join(self.fifo_dir, '.ipc_in')
+ self.ipc_out_fifo = os.path.join(self.fifo_dir, '.ipc_out')
+ self.ipc_lock_file = os.path.join(self.fifo_dir, '.ipc_lock')
+
+ def _daemon_is_alive(self):
+ try:
+ builddir_lock = portage.locks.lockfile(self.fifo_dir,
+ wantnewlockfile=True, flags=os.O_NONBLOCK)
+ except portage.exception.TryAgain:
+ return True
+ else:
+ portage.locks.unlockfile(builddir_lock)
+ return False
+
+ def communicate(self, args):
+
+ # Make locks quiet since unintended locking messages displayed on
+ # stdout could corrupt the intended output of this program.
+ portage.locks._quiet = True
+ lock_obj = portage.locks.lockfile(self.ipc_lock_file, unlinkfile=True)
+
+ try:
+ return self._communicate(args)
+ finally:
+ portage.locks.unlockfile(lock_obj)
+
+ def _timeout_retry_msg(self, start_time, when):
+ time_elapsed = time.time() - start_time
+ portage.util.writemsg_level(
+ portage.localization._(
+ 'ebuild-ipc timed out %s after %d seconds,' + \
+ ' retrying...\n') % (when, time_elapsed),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _no_daemon_msg(self):
+ portage.util.writemsg_level(
+ portage.localization._(
+ 'ebuild-ipc: daemon process not detected\n'),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _wait(self, pid, pr, msg):
+ """
+ Wait on pid and return an appropriate exit code. This
+ may return unsuccessfully due to timeout if the daemon
+ process does not appear to be alive.
+ """
+
+ start_time = time.time()
+
+ while True:
+ try:
+ events = select.select([pr], [], [],
+ self._COMMUNICATE_RETRY_TIMEOUT_SECONDS)
+ except select.error as e:
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s: %s\n" % \
+ (portage.localization._('during select'), e),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if events[0]:
+ break
+
+ if self._daemon_is_alive():
+ self._timeout_retry_msg(start_time, msg)
+ else:
+ self._no_daemon_msg()
+ try:
+ os.kill(pid, signal.SIGKILL)
+ os.waitpid(pid, 0)
+ except OSError as e:
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ return 2
+
+ try:
+ wait_retval = os.waitpid(pid, 0)
+ except OSError as e:
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s: %s\n" % (msg, e),
+ level=logging.ERROR, noiselevel=-1)
+ return 2
+
+ if not os.WIFEXITED(wait_retval[1]):
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s: %s\n" % (msg,
+ portage.localization._('subprocess failure: %s') % \
+ wait_retval[1]),
+ level=logging.ERROR, noiselevel=-1)
+ return 2
+
+ return os.WEXITSTATUS(wait_retval[1])
+
+ def _receive_reply(self, input_fd):
+
+ # Timeouts are handled by the parent process, so just
+ # block until input is available. For maximum portability,
+ # use a single atomic read.
+ buf = None
+ while True:
+ try:
+ events = select.select([input_fd], [], [])
+ except select.error as e:
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s: %s\n" % \
+ (portage.localization._('during select for read'), e),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if events[0]:
+ # For maximum portability, use os.read() here since
+ # array.fromfile() and file.read() are both known to
+ # erroneously return an empty string from this
+ # non-blocking fifo stream on FreeBSD (bug #337465).
+ try:
+ buf = os.read(input_fd, self._BUFSIZE)
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s: %s\n" % \
+ (portage.localization._('read error'), e),
+ level=logging.ERROR, noiselevel=-1)
+ break
+ # Assume that another event will be generated
+ # if there's any relevant data.
+ continue
+
+ # Only one (atomic) read should be necessary.
+ if buf:
+ break
+
+ retval = 2
+
+ if not buf:
+
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s\n" % \
+ (portage.localization._('read failed'),),
+ level=logging.ERROR, noiselevel=-1)
+
+ else:
+
+ try:
+ reply = pickle.loads(buf)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # The pickle module can raise practically
+ # any exception when given corrupt data.
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+
+ else:
+
+ (out, err, retval) = reply
+
+ if out:
+ portage.util.writemsg_stdout(out, noiselevel=-1)
+
+ if err:
+ portage.util.writemsg(err, noiselevel=-1)
+
+ return retval
+
+ def _communicate(self, args):
+
+ if not self._daemon_is_alive():
+ self._no_daemon_msg()
+ return 2
+
+ # Open the input fifo before the output fifo, in order to make it
+ # possible for the daemon to send a reply without blocking. This
+ # improves performance, and also makes it possible for the daemon
+ # to do a non-blocking write without a race condition.
+ input_fd = os.open(self.ipc_out_fifo,
+ os.O_RDONLY|os.O_NONBLOCK)
+
+ # Use forks so that the child process can handle blocking IO
+ # un-interrupted, while the parent handles all timeout
+ # considerations. This helps to avoid possible race conditions
+ # from interference between timeouts and blocking IO operations.
+ pr, pw = os.pipe()
+ pid = os.fork()
+
+ if pid == 0:
+ os.close(pr)
+
+ # File streams are in unbuffered mode since we do atomic
+ # read and write of whole pickles.
+ output_file = open(self.ipc_in_fifo, 'wb', 0)
+ output_file.write(pickle.dumps(args))
+ output_file.close()
+ os._exit(os.EX_OK)
+
+ os.close(pw)
+
+ msg = portage.localization._('during write')
+ retval = self._wait(pid, pr, msg)
+ os.close(pr)
+
+ if retval != os.EX_OK:
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s: %s\n" % (msg,
+ portage.localization._('subprocess failure: %s') % \
+ retval), level=logging.ERROR, noiselevel=-1)
+ return retval
+
+ if not self._daemon_is_alive():
+ self._no_daemon_msg()
+ return 2
+
+ pr, pw = os.pipe()
+ pid = os.fork()
+
+ if pid == 0:
+ os.close(pr)
+ retval = self._receive_reply(input_fd)
+ os._exit(retval)
+
+ os.close(pw)
+ retval = self._wait(pid, pr, portage.localization._('during read'))
+ os.close(pr)
+ os.close(input_fd)
+ return retval
+
+def ebuild_ipc_main(args):
+ ebuild_ipc = EbuildIpc()
+ return ebuild_ipc.communicate(args)
+
+if __name__ == '__main__':
+ sys.exit(ebuild_ipc_main(sys.argv[1:]))
diff --git a/portage_with_autodep/bin/ebuild.sh b/portage_with_autodep/bin/ebuild.sh
new file mode 100755
index 0000000..d68e54b
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild.sh
@@ -0,0 +1,2424 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH="${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"
+PORTAGE_PYM_PATH="${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}"
+
+if [[ $PORTAGE_SANDBOX_COMPAT_LEVEL -lt 22 ]] ; then
+ # Ensure that /dev/std* streams have appropriate sandbox permission for
+ # bug #288863. This can be removed after sandbox is fixed and portage
+ # depends on the fixed version (sandbox-2.2 has the fix but it is
+ # currently unstable).
+ export SANDBOX_WRITE="${SANDBOX_WRITE:+${SANDBOX_WRITE}:}/dev/stdout:/dev/stderr"
+ export SANDBOX_READ="${SANDBOX_READ:+${SANDBOX_READ}:}/dev/stdin"
+fi
+
+# Don't use sandbox's BASH_ENV for new shells because it does
+# 'source /etc/profile' which can interfere with the build
+# environment by modifying our PATH.
+unset BASH_ENV
+
+ROOTPATH=${ROOTPATH##:}
+ROOTPATH=${ROOTPATH%%:}
+PREROOTPATH=${PREROOTPATH##:}
+PREROOTPATH=${PREROOTPATH%%:}
+PATH=$PORTAGE_BIN_PATH/ebuild-helpers:$PREROOTPATH${PREROOTPATH:+:}/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin${ROOTPATH:+:}$ROOTPATH
+export PATH
+
+# This is just a temporary workaround for portage-9999 users since
+# earlier portage versions do not detect a version change in this case
+# (9999 to 9999) and therefore they try execute an incompatible version of
+# ebuild.sh during the upgrade.
+export PORTAGE_BZIP2_COMMAND=${PORTAGE_BZIP2_COMMAND:-bzip2}
+
+# These two functions wrap sourcing and calling respectively. At present they
+# perform a qa check to make sure eclasses and ebuilds and profiles don't mess
+# with shell opts (shopts). Ebuilds/eclasses changing shopts should reset them
+# when they are done.
+
+qa_source() {
+ local shopts=$(shopt) OLDIFS="$IFS"
+ local retval
+ source "$@"
+ retval=$?
+ set +e
+ [[ $shopts != $(shopt) ]] &&
+ eqawarn "QA Notice: Global shell options changed and were not restored while sourcing '$*'"
+ [[ "$IFS" != "$OLDIFS" ]] &&
+ eqawarn "QA Notice: Global IFS changed and was not restored while sourcing '$*'"
+ return $retval
+}
+
+qa_call() {
+ local shopts=$(shopt) OLDIFS="$IFS"
+ local retval
+ "$@"
+ retval=$?
+ set +e
+ [[ $shopts != $(shopt) ]] &&
+ eqawarn "QA Notice: Global shell options changed and were not restored while calling '$*'"
+ [[ "$IFS" != "$OLDIFS" ]] &&
+ eqawarn "QA Notice: Global IFS changed and was not restored while calling '$*'"
+ return $retval
+}
+
+EBUILD_SH_ARGS="$*"
+
+shift $#
+
+# Prevent aliases from causing portage to act inappropriately.
+# Make sure it's before everything so we don't mess aliases that follow.
+unalias -a
+
+# Unset some variables that break things.
+unset GZIP BZIP BZIP2 CDPATH GREP_OPTIONS GREP_COLOR GLOBIGNORE
+
+source "${PORTAGE_BIN_PATH}/isolated-functions.sh" &>/dev/null
+
+[[ $PORTAGE_QUIET != "" ]] && export PORTAGE_QUIET
+
+# sandbox support functions; defined prior to profile.bashrc srcing, since the profile might need to add a default exception (/usr/lib64/conftest fex)
+_sb_append_var() {
+ local _v=$1 ; shift
+ local var="SANDBOX_${_v}"
+ [[ -z $1 || -n $2 ]] && die "Usage: add$(echo ${_v} | \
+ LC_ALL=C tr [:upper:] [:lower:]) <colon-delimited list of paths>"
+ export ${var}="${!var:+${!var}:}$1"
+}
+# bash-4 version:
+# local var="SANDBOX_${1^^}"
+# addread() { _sb_append_var ${0#add} "$@" ; }
+addread() { _sb_append_var READ "$@" ; }
+addwrite() { _sb_append_var WRITE "$@" ; }
+adddeny() { _sb_append_var DENY "$@" ; }
+addpredict() { _sb_append_var PREDICT "$@" ; }
+
+addwrite "${PORTAGE_TMPDIR}"
+addread "/:${PORTAGE_TMPDIR}"
+[[ -n ${PORTAGE_GPG_DIR} ]] && addpredict "${PORTAGE_GPG_DIR}"
+
+# Avoid sandbox violations in temporary directories.
+if [[ -w $T ]] ; then
+ export TEMP=$T
+ export TMP=$T
+ export TMPDIR=$T
+elif [[ $SANDBOX_ON = 1 ]] ; then
+ for x in TEMP TMP TMPDIR ; do
+ [[ -n ${!x} ]] && addwrite "${!x}"
+ done
+ unset x
+fi
+
+# the sandbox is disabled by default except when overridden in the relevant stages
+export SANDBOX_ON=0
+
+lchown() {
+ chown -h "$@"
+}
+
+lchgrp() {
+ chgrp -h "$@"
+}
+
+esyslog() {
+ # Custom version of esyslog() to take care of the "Red Star" bug.
+ # MUST follow functions.sh to override the "" parameter problem.
+ return 0
+}
+
+useq() {
+ has $EBUILD_PHASE prerm postrm || eqawarn \
+ "QA Notice: The 'useq' function is deprecated (replaced by 'use')"
+ use ${1}
+}
+
+usev() {
+ if use ${1}; then
+ echo "${1#!}"
+ return 0
+ fi
+ return 1
+}
+
+use() {
+ local u=$1
+ local found=0
+
+ # if we got something like '!flag', then invert the return value
+ if [[ ${u:0:1} == "!" ]] ; then
+ u=${u:1}
+ found=1
+ fi
+
+ if [[ $EBUILD_PHASE = depend ]] ; then
+ # TODO: Add a registration interface for eclasses to register
+ # any number of phase hooks, so that global scope eclass
+ # initialization can by migrated to phase hooks in new EAPIs.
+ # Example: add_phase_hook before pkg_setup $ECLASS_pre_pkg_setup
+ #if [[ -n $EAPI ]] && ! has "$EAPI" 0 1 2 3 ; then
+ # die "use() called during invalid phase: $EBUILD_PHASE"
+ #fi
+ true
+
+ # Make sure we have this USE flag in IUSE
+ elif [[ -n $PORTAGE_IUSE && -n $EBUILD_PHASE ]] ; then
+ [[ $u =~ $PORTAGE_IUSE ]] || \
+ eqawarn "QA Notice: USE Flag '${u}' not" \
+ "in IUSE for ${CATEGORY}/${PF}"
+ fi
+
+ if has ${u} ${USE} ; then
+ return ${found}
+ else
+ return $((!found))
+ fi
+}
+
+# Return true if given package is installed. Otherwise return false.
+# Takes single depend-type atoms.
+has_version() {
+ if [ "${EBUILD_PHASE}" == "depend" ]; then
+ die "portageq calls (has_version calls portageq) are not allowed in the global scope"
+ fi
+
+ if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+ "$PORTAGE_BIN_PATH"/ebuild-ipc has_version "$ROOT" "$1"
+ else
+ PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" has_version "${ROOT}" "$1"
+ fi
+ local retval=$?
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ *)
+ die "unexpected portageq exit code: ${retval}"
+ ;;
+ esac
+}
+
+portageq() {
+ if [ "${EBUILD_PHASE}" == "depend" ]; then
+ die "portageq calls are not allowed in the global scope"
+ fi
+
+ PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" "$@"
+}
+
+
+# ----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
+
+
+# Returns the best/most-current match.
+# Takes single depend-type atoms.
+best_version() {
+ if [ "${EBUILD_PHASE}" == "depend" ]; then
+ die "portageq calls (best_version calls portageq) are not allowed in the global scope"
+ fi
+
+ if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+ "$PORTAGE_BIN_PATH"/ebuild-ipc best_version "$ROOT" "$1"
+ else
+ PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" 'best_version' "${ROOT}" "$1"
+ fi
+ local retval=$?
+ case "${retval}" in
+ 0|1)
+ return ${retval}
+ ;;
+ *)
+ die "unexpected portageq exit code: ${retval}"
+ ;;
+ esac
+}
+
+use_with() {
+ if [ -z "$1" ]; then
+ echo "!!! use_with() called without a parameter." >&2
+ echo "!!! use_with <USEFLAG> [<flagname> [value]]" >&2
+ return 1
+ fi
+
+ if ! has "${EAPI:-0}" 0 1 2 3 ; then
+ local UW_SUFFIX=${3+=$3}
+ else
+ local UW_SUFFIX=${3:+=$3}
+ fi
+ local UWORD=${2:-$1}
+
+ if use $1; then
+ echo "--with-${UWORD}${UW_SUFFIX}"
+ else
+ echo "--without-${UWORD}"
+ fi
+ return 0
+}
+
+use_enable() {
+ if [ -z "$1" ]; then
+ echo "!!! use_enable() called without a parameter." >&2
+ echo "!!! use_enable <USEFLAG> [<flagname> [value]]" >&2
+ return 1
+ fi
+
+ if ! has "${EAPI:-0}" 0 1 2 3 ; then
+ local UE_SUFFIX=${3+=$3}
+ else
+ local UE_SUFFIX=${3:+=$3}
+ fi
+ local UWORD=${2:-$1}
+
+ if use $1; then
+ echo "--enable-${UWORD}${UE_SUFFIX}"
+ else
+ echo "--disable-${UWORD}"
+ fi
+ return 0
+}
+
+register_die_hook() {
+ local x
+ for x in $* ; do
+ has $x $EBUILD_DEATH_HOOKS || \
+ export EBUILD_DEATH_HOOKS="$EBUILD_DEATH_HOOKS $x"
+ done
+}
+
+register_success_hook() {
+ local x
+ for x in $* ; do
+ has $x $EBUILD_SUCCESS_HOOKS || \
+ export EBUILD_SUCCESS_HOOKS="$EBUILD_SUCCESS_HOOKS $x"
+ done
+}
+
+# Ensure that $PWD is sane whenever possible, to protect against
+# exploitation of insecure search path for python -c in ebuilds.
+# See bug #239560.
+if ! has "$EBUILD_PHASE" clean cleanrm depend help ; then
+ cd "$PORTAGE_BUILDDIR" || \
+ die "PORTAGE_BUILDDIR does not exist: '$PORTAGE_BUILDDIR'"
+fi
+
+#if no perms are specified, dirs/files will have decent defaults
+#(not secretive, but not stupid)
+umask 022
+export DESTTREE=/usr
+export INSDESTTREE=""
+export _E_EXEDESTTREE_=""
+export _E_DOCDESTTREE_=""
+export INSOPTIONS="-m0644"
+export EXEOPTIONS="-m0755"
+export LIBOPTIONS="-m0644"
+export DIROPTIONS="-m0755"
+export MOPREFIX=${PN}
+declare -a PORTAGE_DOCOMPRESS=( /usr/share/{doc,info,man} )
+declare -a PORTAGE_DOCOMPRESS_SKIP=( /usr/share/doc/${PF}/html )
+
+# adds ".keep" files so that dirs aren't auto-cleaned
+keepdir() {
+ dodir "$@"
+ local x
+ if [ "$1" == "-R" ] || [ "$1" == "-r" ]; then
+ shift
+ find "$@" -type d -printf "${D}%p/.keep_${CATEGORY}_${PN}-${SLOT}\n" \
+ | tr "\n" "\0" | \
+ while read -r -d $'\0' ; do
+ >> "$REPLY" || \
+ die "Failed to recursively create .keep files"
+ done
+ else
+ for x in "$@"; do
+ >> "${D}${x}/.keep_${CATEGORY}_${PN}-${SLOT}" || \
+ die "Failed to create .keep in ${D}${x}"
+ done
+ fi
+}
+
+unpack() {
+ local srcdir
+ local x
+ local y
+ local myfail
+ local eapi=${EAPI:-0}
+ [ -z "$*" ] && die "Nothing passed to the 'unpack' command"
+
+ for x in "$@"; do
+ vecho ">>> Unpacking ${x} to ${PWD}"
+ y=${x%.*}
+ y=${y##*.}
+
+ if [[ ${x} == "./"* ]] ; then
+ srcdir=""
+ elif [[ ${x} == ${DISTDIR%/}/* ]] ; then
+ die "Arguments to unpack() cannot begin with \${DISTDIR}."
+ elif [[ ${x} == "/"* ]] ; then
+ die "Arguments to unpack() cannot be absolute"
+ else
+ srcdir="${DISTDIR}/"
+ fi
+ [[ ! -s ${srcdir}${x} ]] && die "${x} does not exist"
+
+ _unpack_tar() {
+ if [ "${y}" == "tar" ]; then
+ $1 -c -- "$srcdir$x" | tar xof -
+ assert_sigpipe_ok "$myfail"
+ else
+ local cwd_dest=${x##*/}
+ cwd_dest=${cwd_dest%.*}
+ $1 -c -- "${srcdir}${x}" > "${cwd_dest}" || die "$myfail"
+ fi
+ }
+
+ myfail="failure unpacking ${x}"
+ case "${x##*.}" in
+ tar)
+ tar xof "$srcdir$x" || die "$myfail"
+ ;;
+ tgz)
+ tar xozf "$srcdir$x" || die "$myfail"
+ ;;
+ tbz|tbz2)
+ ${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- "$srcdir$x" | tar xof -
+ assert_sigpipe_ok "$myfail"
+ ;;
+ ZIP|zip|jar)
+ # unzip will interactively prompt under some error conditions,
+ # as reported in bug #336285
+ ( while true ; do echo n || break ; done ) | \
+ unzip -qo "${srcdir}${x}" || die "$myfail"
+ ;;
+ gz|Z|z)
+ _unpack_tar "gzip -d"
+ ;;
+ bz2|bz)
+ _unpack_tar "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d}"
+ ;;
+ 7Z|7z)
+ local my_output
+ my_output="$(7z x -y "${srcdir}${x}")"
+ if [ $? -ne 0 ]; then
+ echo "${my_output}" >&2
+ die "$myfail"
+ fi
+ ;;
+ RAR|rar)
+ unrar x -idq -o+ "${srcdir}${x}" || die "$myfail"
+ ;;
+ LHa|LHA|lha|lzh)
+ lha xfq "${srcdir}${x}" || die "$myfail"
+ ;;
+ a)
+ ar x "${srcdir}${x}" || die "$myfail"
+ ;;
+ deb)
+ # Unpacking .deb archives can not always be done with
+ # `ar`. For instance on AIX this doesn't work out. If
+ # we have `deb2targz` installed, prefer it over `ar` for
+ # that reason. We just make sure on AIX `deb2targz` is
+ # installed.
+ if type -P deb2targz > /dev/null; then
+ y=${x##*/}
+ local created_symlink=0
+ if [ ! "$srcdir$x" -ef "$y" ] ; then
+ # deb2targz always extracts into the same directory as
+ # the source file, so create a symlink in the current
+ # working directory if necessary.
+ ln -sf "$srcdir$x" "$y" || die "$myfail"
+ created_symlink=1
+ fi
+ deb2targz "$y" || die "$myfail"
+ if [ $created_symlink = 1 ] ; then
+ # Clean up the symlink so the ebuild
+ # doesn't inadvertently install it.
+ rm -f "$y"
+ fi
+ mv -f "${y%.deb}".tar.gz data.tar.gz || die "$myfail"
+ else
+ ar x "$srcdir$x" || die "$myfail"
+ fi
+ ;;
+ lzma)
+ _unpack_tar "lzma -d"
+ ;;
+ xz)
+ if has $eapi 0 1 2 ; then
+ vecho "unpack ${x}: file format not recognized. Ignoring."
+ else
+ _unpack_tar "xz -d"
+ fi
+ ;;
+ *)
+ vecho "unpack ${x}: file format not recognized. Ignoring."
+ ;;
+ esac
+ done
+ # Do not chmod '.' since it's probably ${WORKDIR} and PORTAGE_WORKDIR_MODE
+ # should be preserved.
+ find . -mindepth 1 -maxdepth 1 ! -type l -print0 | \
+ ${XARGS} -0 chmod -fR a+rX,u+w,g-w,o-w
+}
+
+strip_duplicate_slashes() {
+ if [[ -n $1 ]] ; then
+ local removed=$1
+ while [[ ${removed} == *//* ]] ; do
+ removed=${removed//\/\///}
+ done
+ echo ${removed}
+ fi
+}
+
+hasg() {
+ local x s=$1
+ shift
+ for x ; do [[ ${x} == ${s} ]] && echo "${x}" && return 0 ; done
+ return 1
+}
+hasgq() { hasg "$@" >/dev/null ; }
+econf() {
+ local x
+
+ local phase_func=$(_ebuild_arg_to_phase "$EAPI" "$EBUILD_PHASE")
+ if [[ -n $phase_func ]] ; then
+ if has "$EAPI" 0 1 ; then
+ [[ $phase_func != src_compile ]] && \
+ eqawarn "QA Notice: econf called in" \
+ "$phase_func instead of src_compile"
+ else
+ [[ $phase_func != src_configure ]] && \
+ eqawarn "QA Notice: econf called in" \
+ "$phase_func instead of src_configure"
+ fi
+ fi
+
+ : ${ECONF_SOURCE:=.}
+ if [ -x "${ECONF_SOURCE}/configure" ]; then
+ if [[ -n $CONFIG_SHELL && \
+ "$(head -n1 "$ECONF_SOURCE/configure")" =~ ^'#!'[[:space:]]*/bin/sh([[:space:]]|$) ]] ; then
+ sed -e "1s:^#![[:space:]]*/bin/sh:#!$CONFIG_SHELL:" -i "$ECONF_SOURCE/configure" || \
+ die "Substition of shebang in '$ECONF_SOURCE/configure' failed"
+ fi
+ if [ -e /usr/share/gnuconfig/ ]; then
+ find "${WORKDIR}" -type f '(' \
+ -name config.guess -o -name config.sub ')' -print0 | \
+ while read -r -d $'\0' x ; do
+ vecho " * econf: updating ${x/${WORKDIR}\/} with /usr/share/gnuconfig/${x##*/}"
+ cp -f /usr/share/gnuconfig/"${x##*/}" "${x}"
+ done
+ fi
+
+ # EAPI=4 adds --disable-dependency-tracking to econf
+ if ! has "$EAPI" 0 1 2 3 3_pre2 && \
+ "${ECONF_SOURCE}/configure" --help 2>/dev/null | \
+ grep -q disable-dependency-tracking ; then
+ set -- --disable-dependency-tracking "$@"
+ fi
+
+ # if the profile defines a location to install libs to aside from default, pass it on.
+ # if the ebuild passes in --libdir, they're responsible for the conf_libdir fun.
+ local CONF_LIBDIR LIBDIR_VAR="LIBDIR_${ABI}"
+ if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
+ CONF_LIBDIR=${!LIBDIR_VAR}
+ fi
+ if [[ -n ${CONF_LIBDIR} ]] && ! hasgq --libdir=\* "$@" ; then
+ export CONF_PREFIX=$(hasg --exec-prefix=\* "$@")
+ [[ -z ${CONF_PREFIX} ]] && CONF_PREFIX=$(hasg --prefix=\* "$@")
+ : ${CONF_PREFIX:=/usr}
+ CONF_PREFIX=${CONF_PREFIX#*=}
+ [[ ${CONF_PREFIX} != /* ]] && CONF_PREFIX="/${CONF_PREFIX}"
+ [[ ${CONF_LIBDIR} != /* ]] && CONF_LIBDIR="/${CONF_LIBDIR}"
+ set -- --libdir="$(strip_duplicate_slashes ${CONF_PREFIX}${CONF_LIBDIR})" "$@"
+ fi
+
+ set -- \
+ --prefix=/usr \
+ ${CBUILD:+--build=${CBUILD}} \
+ --host=${CHOST} \
+ ${CTARGET:+--target=${CTARGET}} \
+ --mandir=/usr/share/man \
+ --infodir=/usr/share/info \
+ --datadir=/usr/share \
+ --sysconfdir=/etc \
+ --localstatedir=/var/lib \
+ "$@" \
+ ${EXTRA_ECONF}
+ vecho "${ECONF_SOURCE}/configure" "$@"
+
+ if ! "${ECONF_SOURCE}/configure" "$@" ; then
+
+ if [ -s config.log ]; then
+ echo
+ echo "!!! Please attach the following file when seeking support:"
+ echo "!!! ${PWD}/config.log"
+ fi
+ die "econf failed"
+ fi
+ elif [ -f "${ECONF_SOURCE}/configure" ]; then
+ die "configure is not executable"
+ else
+ die "no configure script found"
+ fi
+}
+
+einstall() {
+ # CONF_PREFIX is only set if they didn't pass in libdir above.
+ local LOCAL_EXTRA_EINSTALL="${EXTRA_EINSTALL}"
+ LIBDIR_VAR="LIBDIR_${ABI}"
+ if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
+ CONF_LIBDIR="${!LIBDIR_VAR}"
+ fi
+ unset LIBDIR_VAR
+ if [ -n "${CONF_LIBDIR}" ] && [ "${CONF_PREFIX:+set}" = set ]; then
+ EI_DESTLIBDIR="${D}/${CONF_PREFIX}/${CONF_LIBDIR}"
+ EI_DESTLIBDIR="$(strip_duplicate_slashes ${EI_DESTLIBDIR})"
+ LOCAL_EXTRA_EINSTALL="libdir=${EI_DESTLIBDIR} ${LOCAL_EXTRA_EINSTALL}"
+ unset EI_DESTLIBDIR
+ fi
+
+ if [ -f ./[mM]akefile -o -f ./GNUmakefile ] ; then
+ if [ "${PORTAGE_DEBUG}" == "1" ]; then
+ ${MAKE:-make} -n prefix="${D}usr" \
+ datadir="${D}usr/share" \
+ infodir="${D}usr/share/info" \
+ localstatedir="${D}var/lib" \
+ mandir="${D}usr/share/man" \
+ sysconfdir="${D}etc" \
+ ${LOCAL_EXTRA_EINSTALL} \
+ ${MAKEOPTS} ${EXTRA_EMAKE} -j1 \
+ "$@" install
+ fi
+ ${MAKE:-make} prefix="${D}usr" \
+ datadir="${D}usr/share" \
+ infodir="${D}usr/share/info" \
+ localstatedir="${D}var/lib" \
+ mandir="${D}usr/share/man" \
+ sysconfdir="${D}etc" \
+ ${LOCAL_EXTRA_EINSTALL} \
+ ${MAKEOPTS} ${EXTRA_EMAKE} -j1 \
+ "$@" install || die "einstall failed"
+ else
+ die "no Makefile found"
+ fi
+}
+
+_eapi0_pkg_nofetch() {
+ [ -z "${SRC_URI}" ] && return
+
+ elog "The following are listed in SRC_URI for ${PN}:"
+ local x
+ for x in $(echo ${SRC_URI}); do
+ elog " ${x}"
+ done
+}
+
+_eapi0_src_unpack() {
+ [[ -n ${A} ]] && unpack ${A}
+}
+
+_eapi0_src_compile() {
+ if [ -x ./configure ] ; then
+ econf
+ fi
+ _eapi2_src_compile
+}
+
+_eapi0_src_test() {
+ # Since we don't want emake's automatic die
+ # support (EAPI 4 and later), and we also don't
+ # want the warning messages that it produces if
+ # we call it in 'nonfatal' mode, we use emake_cmd
+ # to emulate the desired parts of emake behavior.
+ local emake_cmd="${MAKE:-make} ${MAKEOPTS} ${EXTRA_EMAKE}"
+ if $emake_cmd -j1 check -n &> /dev/null; then
+ vecho ">>> Test phase [check]: ${CATEGORY}/${PF}"
+ if ! $emake_cmd -j1 check; then
+ has test $FEATURES && die "Make check failed. See above for details."
+ has test $FEATURES || eerror "Make check failed. See above for details."
+ fi
+ elif $emake_cmd -j1 test -n &> /dev/null; then
+ vecho ">>> Test phase [test]: ${CATEGORY}/${PF}"
+ if ! $emake_cmd -j1 test; then
+ has test $FEATURES && die "Make test failed. See above for details."
+ has test $FEATURES || eerror "Make test failed. See above for details."
+ fi
+ else
+ vecho ">>> Test phase [none]: ${CATEGORY}/${PF}"
+ fi
+}
+
+_eapi1_src_compile() {
+ _eapi2_src_configure
+ _eapi2_src_compile
+}
+
+_eapi2_src_configure() {
+ if [[ -x ${ECONF_SOURCE:-.}/configure ]] ; then
+ econf
+ fi
+}
+
+_eapi2_src_compile() {
+ if [ -f Makefile ] || [ -f GNUmakefile ] || [ -f makefile ]; then
+ emake || die "emake failed"
+ fi
+}
+
+_eapi4_src_install() {
+ if [[ -f Makefile || -f GNUmakefile || -f makefile ]] ; then
+ emake DESTDIR="${D}" install
+ fi
+
+ if ! declare -p DOCS &>/dev/null ; then
+ local d
+ for d in README* ChangeLog AUTHORS NEWS TODO CHANGES \
+ THANKS BUGS FAQ CREDITS CHANGELOG ; do
+ [[ -s "${d}" ]] && dodoc "${d}"
+ done
+ elif [[ $(declare -p DOCS) == "declare -a "* ]] ; then
+ dodoc "${DOCS[@]}"
+ else
+ dodoc ${DOCS}
+ fi
+}
+
+ebuild_phase() {
+ declare -F "$1" >/dev/null && qa_call $1
+}
+
+ebuild_phase_with_hooks() {
+ local x phase_name=${1}
+ for x in {pre_,,post_}${phase_name} ; do
+ ebuild_phase ${x}
+ done
+}
+
+dyn_pretend() {
+ if [[ -e $PORTAGE_BUILDDIR/.pretended ]] ; then
+ vecho ">>> It appears that '$PF' is already pretended; skipping."
+ vecho ">>> Remove '$PORTAGE_BUILDDIR/.pretended' to force pretend."
+ return 0
+ fi
+ ebuild_phase pre_pkg_pretend
+ ebuild_phase pkg_pretend
+ >> "$PORTAGE_BUILDDIR/.pretended" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.pretended"
+ ebuild_phase post_pkg_pretend
+}
+
+dyn_setup() {
+ if [[ -e $PORTAGE_BUILDDIR/.setuped ]] ; then
+ vecho ">>> It appears that '$PF' is already setup; skipping."
+ vecho ">>> Remove '$PORTAGE_BUILDDIR/.setuped' to force setup."
+ return 0
+ fi
+ ebuild_phase pre_pkg_setup
+ ebuild_phase pkg_setup
+ >> "$PORTAGE_BUILDDIR/.setuped" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.setuped"
+ ebuild_phase post_pkg_setup
+}
+
+dyn_unpack() {
+ local newstuff="no"
+ if [ -e "${WORKDIR}" ]; then
+ local x
+ local checkme
+ for x in $A ; do
+ vecho ">>> Checking ${x}'s mtime..."
+ if [ "${PORTAGE_ACTUAL_DISTDIR:-${DISTDIR}}/${x}" -nt "${WORKDIR}" ]; then
+ vecho ">>> ${x} has been updated; recreating WORKDIR..."
+ newstuff="yes"
+ break
+ fi
+ done
+ if [ ! -f "${PORTAGE_BUILDDIR}/.unpacked" ] ; then
+ vecho ">>> Not marked as unpacked; recreating WORKDIR..."
+ newstuff="yes"
+ fi
+ fi
+ if [ "${newstuff}" == "yes" ]; then
+ # We don't necessarily have privileges to do a full dyn_clean here.
+ rm -rf "${PORTAGE_BUILDDIR}"/{.setuped,.unpacked,.prepared,.configured,.compiled,.tested,.installed,.packaged,build-info}
+ if ! has keepwork $FEATURES ; then
+ rm -rf "${WORKDIR}"
+ fi
+ if [ -d "${T}" ] && \
+ ! has keeptemp $FEATURES ; then
+ rm -rf "${T}" && mkdir "${T}"
+ fi
+ fi
+ if [ -e "${WORKDIR}" ]; then
+ if [ "$newstuff" == "no" ]; then
+ vecho ">>> WORKDIR is up-to-date, keeping..."
+ return 0
+ fi
+ fi
+
+ if [ ! -d "${WORKDIR}" ]; then
+ install -m${PORTAGE_WORKDIR_MODE:-0700} -d "${WORKDIR}" || die "Failed to create dir '${WORKDIR}'"
+ fi
+ cd "${WORKDIR}" || die "Directory change failed: \`cd '${WORKDIR}'\`"
+ ebuild_phase pre_src_unpack
+ vecho ">>> Unpacking source..."
+ ebuild_phase src_unpack
+ >> "$PORTAGE_BUILDDIR/.unpacked" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.unpacked"
+ vecho ">>> Source unpacked in ${WORKDIR}"
+ ebuild_phase post_src_unpack
+}
+
+dyn_clean() {
+ if [ -z "${PORTAGE_BUILDDIR}" ]; then
+ echo "Aborting clean phase because PORTAGE_BUILDDIR is unset!"
+ return 1
+ elif [ ! -d "${PORTAGE_BUILDDIR}" ] ; then
+ return 0
+ fi
+ if has chflags $FEATURES ; then
+ chflags -R noschg,nouchg,nosappnd,nouappnd "${PORTAGE_BUILDDIR}"
+ chflags -R nosunlnk,nouunlnk "${PORTAGE_BUILDDIR}" 2>/dev/null
+ fi
+
+ rm -rf "${PORTAGE_BUILDDIR}/image" "${PORTAGE_BUILDDIR}/homedir"
+ rm -f "${PORTAGE_BUILDDIR}/.installed"
+
+ if [[ $EMERGE_FROM = binary ]] || \
+ ! has keeptemp $FEATURES && ! has keepwork $FEATURES ; then
+ rm -rf "${T}"
+ fi
+
+ if [[ $EMERGE_FROM = binary ]] || ! has keepwork $FEATURES; then
+ rm -f "$PORTAGE_BUILDDIR"/.{ebuild_changed,logid,pretended,setuped,unpacked,prepared} \
+ "$PORTAGE_BUILDDIR"/.{configured,compiled,tested,packaged} \
+ "$PORTAGE_BUILDDIR"/.die_hooks \
+ "$PORTAGE_BUILDDIR"/.ipc_{in,out,lock} \
+ "$PORTAGE_BUILDDIR"/.exit_status
+
+ rm -rf "${PORTAGE_BUILDDIR}/build-info"
+ rm -rf "${WORKDIR}"
+ fi
+
+ if [ -f "${PORTAGE_BUILDDIR}/.unpacked" ]; then
+ find "${PORTAGE_BUILDDIR}" -type d ! -regex "^${WORKDIR}" | sort -r | tr "\n" "\0" | $XARGS -0 rmdir &>/dev/null
+ fi
+
+ # do not bind this to doebuild defined DISTDIR; don't trust doebuild, and if mistakes are made it'll
+ # result in it wiping the users distfiles directory (bad).
+ rm -rf "${PORTAGE_BUILDDIR}/distdir"
+
+ # Some kernels, such as Solaris, return EINVAL when an attempt
+ # is made to remove the current working directory.
+ cd "$PORTAGE_BUILDDIR"/../..
+ rmdir "$PORTAGE_BUILDDIR" 2>/dev/null
+
+ true
+}
+
+into() {
+ if [ "$1" == "/" ]; then
+ export DESTTREE=""
+ else
+ export DESTTREE=$1
+ if [ ! -d "${D}${DESTTREE}" ]; then
+ install -d "${D}${DESTTREE}"
+ local ret=$?
+ if [[ $ret -ne 0 ]] ; then
+ helpers_die "${FUNCNAME[0]} failed"
+ return $ret
+ fi
+ fi
+ fi
+}
+
+insinto() {
+ if [ "$1" == "/" ]; then
+ export INSDESTTREE=""
+ else
+ export INSDESTTREE=$1
+ if [ ! -d "${D}${INSDESTTREE}" ]; then
+ install -d "${D}${INSDESTTREE}"
+ local ret=$?
+ if [[ $ret -ne 0 ]] ; then
+ helpers_die "${FUNCNAME[0]} failed"
+ return $ret
+ fi
+ fi
+ fi
+}
+
+exeinto() {
+ if [ "$1" == "/" ]; then
+ export _E_EXEDESTTREE_=""
+ else
+ export _E_EXEDESTTREE_="$1"
+ if [ ! -d "${D}${_E_EXEDESTTREE_}" ]; then
+ install -d "${D}${_E_EXEDESTTREE_}"
+ local ret=$?
+ if [[ $ret -ne 0 ]] ; then
+ helpers_die "${FUNCNAME[0]} failed"
+ return $ret
+ fi
+ fi
+ fi
+}
+
+docinto() {
+ if [ "$1" == "/" ]; then
+ export _E_DOCDESTTREE_=""
+ else
+ export _E_DOCDESTTREE_="$1"
+ if [ ! -d "${D}usr/share/doc/${PF}/${_E_DOCDESTTREE_}" ]; then
+ install -d "${D}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
+ local ret=$?
+ if [[ $ret -ne 0 ]] ; then
+ helpers_die "${FUNCNAME[0]} failed"
+ return $ret
+ fi
+ fi
+ fi
+}
+
+insopts() {
+ export INSOPTIONS="$@"
+
+ # `install` should never be called with '-s' ...
+ has -s ${INSOPTIONS} && die "Never call insopts() with -s"
+}
+
+diropts() {
+ export DIROPTIONS="$@"
+}
+
+exeopts() {
+ export EXEOPTIONS="$@"
+
+ # `install` should never be called with '-s' ...
+ has -s ${EXEOPTIONS} && die "Never call exeopts() with -s"
+}
+
+libopts() {
+ export LIBOPTIONS="$@"
+
+ # `install` should never be called with '-s' ...
+ has -s ${LIBOPTIONS} && die "Never call libopts() with -s"
+}
+
+docompress() {
+ has "${EAPI}" 0 1 2 3 && die "'docompress' not supported in this EAPI"
+
+ local f g
+ if [[ $1 = "-x" ]]; then
+ shift
+ for f; do
+ f=$(strip_duplicate_slashes "${f}"); f=${f%/}
+ [[ ${f:0:1} = / ]] || f="/${f}"
+ for g in "${PORTAGE_DOCOMPRESS_SKIP[@]}"; do
+ [[ ${f} = "${g}" ]] && continue 2
+ done
+ PORTAGE_DOCOMPRESS_SKIP[${#PORTAGE_DOCOMPRESS_SKIP[@]}]=${f}
+ done
+ else
+ for f; do
+ f=$(strip_duplicate_slashes "${f}"); f=${f%/}
+ [[ ${f:0:1} = / ]] || f="/${f}"
+ for g in "${PORTAGE_DOCOMPRESS[@]}"; do
+ [[ ${f} = "${g}" ]] && continue 2
+ done
+ PORTAGE_DOCOMPRESS[${#PORTAGE_DOCOMPRESS[@]}]=${f}
+ done
+ fi
+}
+
+abort_handler() {
+ local msg
+ if [ "$2" != "fail" ]; then
+ msg="${EBUILD}: ${1} aborted; exiting."
+ else
+ msg="${EBUILD}: ${1} failed; exiting."
+ fi
+ echo
+ echo "$msg"
+ echo
+ eval ${3}
+ #unset signal handler
+ trap - SIGINT SIGQUIT
+}
+
+abort_prepare() {
+ abort_handler src_prepare $1
+ rm -f "$PORTAGE_BUILDDIR/.prepared"
+ exit 1
+}
+
+abort_configure() {
+ abort_handler src_configure $1
+ rm -f "$PORTAGE_BUILDDIR/.configured"
+ exit 1
+}
+
+abort_compile() {
+ abort_handler "src_compile" $1
+ rm -f "${PORTAGE_BUILDDIR}/.compiled"
+ exit 1
+}
+
+abort_test() {
+ abort_handler "dyn_test" $1
+ rm -f "${PORTAGE_BUILDDIR}/.tested"
+ exit 1
+}
+
+abort_install() {
+ abort_handler "src_install" $1
+ rm -rf "${PORTAGE_BUILDDIR}/image"
+ exit 1
+}
+
+has_phase_defined_up_to() {
+ local phase
+ for phase in unpack prepare configure compile install; do
+ has ${phase} ${DEFINED_PHASES} && return 0
+ [[ ${phase} == $1 ]] && return 1
+ done
+ # We shouldn't actually get here
+ return 1
+}
+
+dyn_prepare() {
+
+ if [[ -e $PORTAGE_BUILDDIR/.prepared ]] ; then
+ vecho ">>> It appears that '$PF' is already prepared; skipping."
+ vecho ">>> Remove '$PORTAGE_BUILDDIR/.prepared' to force prepare."
+ return 0
+ fi
+
+ if [[ -d $S ]] ; then
+ cd "${S}"
+ elif has $EAPI 0 1 2 3 3_pre2 ; then
+ cd "${WORKDIR}"
+ elif [[ -z ${A} ]] && ! has_phase_defined_up_to prepare; then
+ cd "${WORKDIR}"
+ else
+ die "The source directory '${S}' doesn't exist"
+ fi
+
+ trap abort_prepare SIGINT SIGQUIT
+
+ ebuild_phase pre_src_prepare
+ vecho ">>> Preparing source in $PWD ..."
+ ebuild_phase src_prepare
+ >> "$PORTAGE_BUILDDIR/.prepared" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.prepared"
+ vecho ">>> Source prepared."
+ ebuild_phase post_src_prepare
+
+ trap - SIGINT SIGQUIT
+}
+
+dyn_configure() {
+
+ if [[ -e $PORTAGE_BUILDDIR/.configured ]] ; then
+ vecho ">>> It appears that '$PF' is already configured; skipping."
+ vecho ">>> Remove '$PORTAGE_BUILDDIR/.configured' to force configuration."
+ return 0
+ fi
+
+ if [[ -d $S ]] ; then
+ cd "${S}"
+ elif has $EAPI 0 1 2 3 3_pre2 ; then
+ cd "${WORKDIR}"
+ elif [[ -z ${A} ]] && ! has_phase_defined_up_to configure; then
+ cd "${WORKDIR}"
+ else
+ die "The source directory '${S}' doesn't exist"
+ fi
+
+ trap abort_configure SIGINT SIGQUIT
+
+ ebuild_phase pre_src_configure
+
+ vecho ">>> Configuring source in $PWD ..."
+ ebuild_phase src_configure
+ >> "$PORTAGE_BUILDDIR/.configured" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.configured"
+ vecho ">>> Source configured."
+
+ ebuild_phase post_src_configure
+
+ trap - SIGINT SIGQUIT
+}
+
+dyn_compile() {
+
+ if [[ -e $PORTAGE_BUILDDIR/.compiled ]] ; then
+ vecho ">>> It appears that '${PF}' is already compiled; skipping."
+ vecho ">>> Remove '$PORTAGE_BUILDDIR/.compiled' to force compilation."
+ return 0
+ fi
+
+ if [[ -d $S ]] ; then
+ cd "${S}"
+ elif has $EAPI 0 1 2 3 3_pre2 ; then
+ cd "${WORKDIR}"
+ elif [[ -z ${A} ]] && ! has_phase_defined_up_to compile; then
+ cd "${WORKDIR}"
+ else
+ die "The source directory '${S}' doesn't exist"
+ fi
+
+ trap abort_compile SIGINT SIGQUIT
+
+ if has distcc $FEATURES && has distcc-pump $FEATURES ; then
+ if [[ -z $INCLUDE_SERVER_PORT ]] || [[ ! -w $INCLUDE_SERVER_PORT ]] ; then
+ eval $(pump --startup)
+ trap "pump --shutdown" EXIT
+ fi
+ fi
+
+ ebuild_phase pre_src_compile
+
+ vecho ">>> Compiling source in $PWD ..."
+ ebuild_phase src_compile
+ >> "$PORTAGE_BUILDDIR/.compiled" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.compiled"
+ vecho ">>> Source compiled."
+
+ ebuild_phase post_src_compile
+
+ trap - SIGINT SIGQUIT
+}
+
+dyn_test() {
+
+ if [[ -e $PORTAGE_BUILDDIR/.tested ]] ; then
+ vecho ">>> It appears that ${PN} has already been tested; skipping."
+ vecho ">>> Remove '${PORTAGE_BUILDDIR}/.tested' to force test."
+ return
+ fi
+
+ if [ "${EBUILD_FORCE_TEST}" == "1" ] ; then
+ # If USE came from ${T}/environment then it might not have USE=test
+ # like it's supposed to here.
+ ! has test ${USE} && export USE="${USE} test"
+ fi
+
+ trap "abort_test" SIGINT SIGQUIT
+ if [ -d "${S}" ]; then
+ cd "${S}"
+ else
+ cd "${WORKDIR}"
+ fi
+
+ if ! has test $FEATURES && [ "${EBUILD_FORCE_TEST}" != "1" ]; then
+ vecho ">>> Test phase [not enabled]: ${CATEGORY}/${PF}"
+ elif has test $RESTRICT; then
+ einfo "Skipping make test/check due to ebuild restriction."
+ vecho ">>> Test phase [explicitly disabled]: ${CATEGORY}/${PF}"
+ else
+ local save_sp=${SANDBOX_PREDICT}
+ addpredict /
+ ebuild_phase pre_src_test
+ ebuild_phase src_test
+ >> "$PORTAGE_BUILDDIR/.tested" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.tested"
+ ebuild_phase post_src_test
+ SANDBOX_PREDICT=${save_sp}
+ fi
+
+ trap - SIGINT SIGQUIT
+}
+
+dyn_install() {
+ [ -z "$PORTAGE_BUILDDIR" ] && die "${FUNCNAME}: PORTAGE_BUILDDIR is unset"
+ if has noauto $FEATURES ; then
+ rm -f "${PORTAGE_BUILDDIR}/.installed"
+ elif [[ -e $PORTAGE_BUILDDIR/.installed ]] ; then
+ vecho ">>> It appears that '${PF}' is already installed; skipping."
+ vecho ">>> Remove '${PORTAGE_BUILDDIR}/.installed' to force install."
+ return 0
+ fi
+ trap "abort_install" SIGINT SIGQUIT
+ ebuild_phase pre_src_install
+ rm -rf "${PORTAGE_BUILDDIR}/image"
+ mkdir "${PORTAGE_BUILDDIR}/image"
+ if [[ -d $S ]] ; then
+ cd "${S}"
+ elif has $EAPI 0 1 2 3 3_pre2 ; then
+ cd "${WORKDIR}"
+ elif [[ -z ${A} ]] && ! has_phase_defined_up_to install; then
+ cd "${WORKDIR}"
+ else
+ die "The source directory '${S}' doesn't exist"
+ fi
+
+ vecho
+ vecho ">>> Install ${PF} into ${D} category ${CATEGORY}"
+ #our custom version of libtool uses $S and $D to fix
+ #invalid paths in .la files
+ export S D
+
+ # Reset exeinto(), docinto(), insinto(), and into() state variables
+ # in case the user is running the install phase multiple times
+ # consecutively via the ebuild command.
+ export DESTTREE=/usr
+ export INSDESTTREE=""
+ export _E_EXEDESTTREE_=""
+ export _E_DOCDESTTREE_=""
+
+ ebuild_phase src_install
+ >> "$PORTAGE_BUILDDIR/.installed" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.installed"
+ vecho ">>> Completed installing ${PF} into ${D}"
+ vecho
+ ebuild_phase post_src_install
+
+ cd "${PORTAGE_BUILDDIR}"/build-info
+ set -f
+ local f x
+ IFS=$' \t\n\r'
+ for f in CATEGORY DEFINED_PHASES FEATURES INHERITED IUSE REQUIRED_USE \
+ PF PKGUSE SLOT KEYWORDS HOMEPAGE DESCRIPTION ; do
+ x=$(echo -n ${!f})
+ [[ -n $x ]] && echo "$x" > $f
+ done
+ if [[ $CATEGORY != virtual ]] ; then
+ for f in ASFLAGS CBUILD CC CFLAGS CHOST CTARGET CXX \
+ CXXFLAGS EXTRA_ECONF EXTRA_EINSTALL EXTRA_MAKE \
+ LDFLAGS LIBCFLAGS LIBCXXFLAGS ; do
+ x=$(echo -n ${!f})
+ [[ -n $x ]] && echo "$x" > $f
+ done
+ fi
+ echo "${USE}" > USE
+ echo "${EAPI:-0}" > EAPI
+ set +f
+
+ # local variables can leak into the saved environment.
+ unset f
+
+ save_ebuild_env --exclude-init-phases | filter_readonly_variables \
+ --filter-path --filter-sandbox --allow-extra-vars > environment
+ assert "save_ebuild_env failed"
+
+ ${PORTAGE_BZIP2_COMMAND} -f9 environment
+
+ cp "${EBUILD}" "${PF}.ebuild"
+ [ -n "${PORTAGE_REPO_NAME}" ] && echo "${PORTAGE_REPO_NAME}" > repository
+ if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT}
+ then
+ >> DEBUGBUILD
+ fi
+ trap - SIGINT SIGQUIT
+}
+
+dyn_preinst() {
+ if [ -z "${D}" ]; then
+ eerror "${FUNCNAME}: D is unset"
+ return 1
+ fi
+ ebuild_phase_with_hooks pkg_preinst
+}
+
+dyn_help() {
+ echo
+ echo "Portage"
+ echo "Copyright 1999-2010 Gentoo Foundation"
+ echo
+ echo "How to use the ebuild command:"
+ echo
+ echo "The first argument to ebuild should be an existing .ebuild file."
+ echo
+ echo "One or more of the following options can then be specified. If more"
+ echo "than one option is specified, each will be executed in order."
+ echo
+ echo " help : show this help screen"
+ echo " pretend : execute package specific pretend actions"
+ echo " setup : execute package specific setup actions"
+ echo " fetch : download source archive(s) and patches"
+ echo " digest : create a manifest file for the package"
+ echo " manifest : create a manifest file for the package"
+ echo " unpack : unpack sources (auto-dependencies if needed)"
+ echo " prepare : prepare sources (auto-dependencies if needed)"
+ echo " configure : configure sources (auto-fetch/unpack if needed)"
+ echo " compile : compile sources (auto-fetch/unpack/configure if needed)"
+ echo " test : test package (auto-fetch/unpack/configure/compile if needed)"
+ echo " preinst : execute pre-install instructions"
+ echo " postinst : execute post-install instructions"
+ echo " install : install the package to the temporary install directory"
+ echo " qmerge : merge image into live filesystem, recording files in db"
+ echo " merge : do fetch, unpack, compile, install and qmerge"
+ echo " prerm : execute pre-removal instructions"
+ echo " postrm : execute post-removal instructions"
+ echo " unmerge : remove package from live filesystem"
+ echo " config : execute package specific configuration actions"
+ echo " package : create a tarball package in ${PKGDIR}/All"
+ echo " rpm : build a RedHat RPM package"
+ echo " clean : clean up all source and temporary files"
+ echo
+ echo "The following settings will be used for the ebuild process:"
+ echo
+ echo " package : ${PF}"
+ echo " slot : ${SLOT}"
+ echo " category : ${CATEGORY}"
+ echo " description : ${DESCRIPTION}"
+ echo " system : ${CHOST}"
+ echo " c flags : ${CFLAGS}"
+ echo " c++ flags : ${CXXFLAGS}"
+ echo " make flags : ${MAKEOPTS}"
+ echo -n " build mode : "
+ if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT} ;
+ then
+ echo "debug (large)"
+ else
+ echo "production (stripped)"
+ fi
+ echo " merge to : ${ROOT}"
+ echo
+ if [ -n "$USE" ]; then
+ echo "Additionally, support for the following optional features will be enabled:"
+ echo
+ echo " ${USE}"
+ fi
+ echo
+}
+
+# debug-print() gets called from many places with verbose status information useful
+# for tracking down problems. The output is in $T/eclass-debug.log.
+# You can set ECLASS_DEBUG_OUTPUT to redirect the output somewhere else as well.
+# The special "on" setting echoes the information, mixing it with the rest of the
+# emerge output.
+# You can override the setting by exporting a new one from the console, or you can
+# set a new default in make.*. Here the default is "" or unset.
+
+# in the future might use e* from /etc/init.d/functions.sh if i feel like it
+debug-print() {
+ # if $T isn't defined, we're in dep calculation mode and
+ # shouldn't do anything
+ [[ $EBUILD_PHASE = depend || ! -d ${T} || ${#} -eq 0 ]] && return 0
+
+ if [[ ${ECLASS_DEBUG_OUTPUT} == on ]]; then
+ printf 'debug: %s\n' "${@}" >&2
+ elif [[ -n ${ECLASS_DEBUG_OUTPUT} ]]; then
+ printf 'debug: %s\n' "${@}" >> "${ECLASS_DEBUG_OUTPUT}"
+ fi
+
+ if [[ -w $T ]] ; then
+ # default target
+ printf '%s\n' "${@}" >> "${T}/eclass-debug.log"
+ # let the portage user own/write to this file
+ chgrp portage "${T}/eclass-debug.log" &>/dev/null
+ chmod g+w "${T}/eclass-debug.log" &>/dev/null
+ fi
+}
+
+# The following 2 functions are debug-print() wrappers
+
+debug-print-function() {
+ debug-print "${1}: entering function, parameters: ${*:2}"
+}
+
+debug-print-section() {
+ debug-print "now in section ${*}"
+}
+
+# Sources all eclasses in parameters
+declare -ix ECLASS_DEPTH=0
+inherit() {
+ ECLASS_DEPTH=$(($ECLASS_DEPTH + 1))
+ if [[ ${ECLASS_DEPTH} > 1 ]]; then
+ debug-print "*** Multiple Inheritence (Level: ${ECLASS_DEPTH})"
+ fi
+
+ if [[ -n $ECLASS && -n ${!__export_funcs_var} ]] ; then
+ echo "QA Notice: EXPORT_FUNCTIONS is called before inherit in" \
+ "$ECLASS.eclass. For compatibility with <=portage-2.1.6.7," \
+ "only call EXPORT_FUNCTIONS after inherit(s)." \
+ | fmt -w 75 | while read -r ; do eqawarn "$REPLY" ; done
+ fi
+
+ local location
+ local olocation
+ local x
+
+ # These variables must be restored before returning.
+ local PECLASS=$ECLASS
+ local prev_export_funcs_var=$__export_funcs_var
+
+ local B_IUSE
+ local B_REQUIRED_USE
+ local B_DEPEND
+ local B_RDEPEND
+ local B_PDEPEND
+ while [ "$1" ]; do
+ location="${ECLASSDIR}/${1}.eclass"
+ olocation=""
+
+ export ECLASS="$1"
+ __export_funcs_var=__export_functions_$ECLASS_DEPTH
+ unset $__export_funcs_var
+
+ if [ "${EBUILD_PHASE}" != "depend" ] && \
+ [[ ${EBUILD_PHASE} != *rm ]] && \
+ [[ ${EMERGE_FROM} != "binary" ]] ; then
+ # This is disabled in the *rm phases because they frequently give
+ # false alarms due to INHERITED in /var/db/pkg being outdated
+ # in comparison the the eclasses from the portage tree.
+ if ! has $ECLASS $INHERITED $__INHERITED_QA_CACHE ; then
+ eqawarn "QA Notice: ECLASS '$ECLASS' inherited illegally in $CATEGORY/$PF $EBUILD_PHASE"
+ fi
+ fi
+
+ # any future resolution code goes here
+ if [ -n "$PORTDIR_OVERLAY" ]; then
+ local overlay
+ for overlay in ${PORTDIR_OVERLAY}; do
+ olocation="${overlay}/eclass/${1}.eclass"
+ if [ -e "$olocation" ]; then
+ location="${olocation}"
+ debug-print " eclass exists: ${location}"
+ fi
+ done
+ fi
+ debug-print "inherit: $1 -> $location"
+ [ ! -e "$location" ] && die "${1}.eclass could not be found by inherit()"
+
+ if [ "${location}" == "${olocation}" ] && \
+ ! has "${location}" ${EBUILD_OVERLAY_ECLASSES} ; then
+ EBUILD_OVERLAY_ECLASSES="${EBUILD_OVERLAY_ECLASSES} ${location}"
+ fi
+
+ #We need to back up the value of DEPEND and RDEPEND to B_DEPEND and B_RDEPEND
+ #(if set).. and then restore them after the inherit call.
+
+ #turn off glob expansion
+ set -f
+
+ # Retain the old data and restore it later.
+ unset B_IUSE B_REQUIRED_USE B_DEPEND B_RDEPEND B_PDEPEND
+ [ "${IUSE+set}" = set ] && B_IUSE="${IUSE}"
+ [ "${REQUIRED_USE+set}" = set ] && B_REQUIRED_USE="${REQUIRED_USE}"
+ [ "${DEPEND+set}" = set ] && B_DEPEND="${DEPEND}"
+ [ "${RDEPEND+set}" = set ] && B_RDEPEND="${RDEPEND}"
+ [ "${PDEPEND+set}" = set ] && B_PDEPEND="${PDEPEND}"
+ unset IUSE REQUIRED_USE DEPEND RDEPEND PDEPEND
+ #turn on glob expansion
+ set +f
+
+ qa_source "$location" || die "died sourcing $location in inherit()"
+
+ #turn off glob expansion
+ set -f
+
+ # If each var has a value, append it to the global variable E_* to
+ # be applied after everything is finished. New incremental behavior.
+ [ "${IUSE+set}" = set ] && export E_IUSE="${E_IUSE} ${IUSE}"
+ [ "${REQUIRED_USE+set}" = set ] && export E_REQUIRED_USE="${E_REQUIRED_USE} ${REQUIRED_USE}"
+ [ "${DEPEND+set}" = set ] && export E_DEPEND="${E_DEPEND} ${DEPEND}"
+ [ "${RDEPEND+set}" = set ] && export E_RDEPEND="${E_RDEPEND} ${RDEPEND}"
+ [ "${PDEPEND+set}" = set ] && export E_PDEPEND="${E_PDEPEND} ${PDEPEND}"
+
+ [ "${B_IUSE+set}" = set ] && IUSE="${B_IUSE}"
+ [ "${B_IUSE+set}" = set ] || unset IUSE
+
+ [ "${B_REQUIRED_USE+set}" = set ] && REQUIRED_USE="${B_REQUIRED_USE}"
+ [ "${B_REQUIRED_USE+set}" = set ] || unset REQUIRED_USE
+
+ [ "${B_DEPEND+set}" = set ] && DEPEND="${B_DEPEND}"
+ [ "${B_DEPEND+set}" = set ] || unset DEPEND
+
+ [ "${B_RDEPEND+set}" = set ] && RDEPEND="${B_RDEPEND}"
+ [ "${B_RDEPEND+set}" = set ] || unset RDEPEND
+
+ [ "${B_PDEPEND+set}" = set ] && PDEPEND="${B_PDEPEND}"
+ [ "${B_PDEPEND+set}" = set ] || unset PDEPEND
+
+ #turn on glob expansion
+ set +f
+
+ if [[ -n ${!__export_funcs_var} ]] ; then
+ for x in ${!__export_funcs_var} ; do
+ debug-print "EXPORT_FUNCTIONS: $x -> ${ECLASS}_$x"
+ declare -F "${ECLASS}_$x" >/dev/null || \
+ die "EXPORT_FUNCTIONS: ${ECLASS}_$x is not defined"
+ eval "$x() { ${ECLASS}_$x \"\$@\" ; }" > /dev/null
+ done
+ fi
+ unset $__export_funcs_var
+
+ has $1 $INHERITED || export INHERITED="$INHERITED $1"
+
+ shift
+ done
+ ((--ECLASS_DEPTH)) # Returns 1 when ECLASS_DEPTH reaches 0.
+ if (( ECLASS_DEPTH > 0 )) ; then
+ export ECLASS=$PECLASS
+ __export_funcs_var=$prev_export_funcs_var
+ else
+ unset ECLASS __export_funcs_var
+ fi
+ return 0
+}
+
+# Exports stub functions that call the eclass's functions, thereby making them default.
+# For example, if ECLASS="base" and you call "EXPORT_FUNCTIONS src_unpack", the following
+# code will be eval'd:
+# src_unpack() { base_src_unpack; }
+EXPORT_FUNCTIONS() {
+ if [ -z "$ECLASS" ]; then
+ die "EXPORT_FUNCTIONS without a defined ECLASS"
+ fi
+ eval $__export_funcs_var+=\" $*\"
+}
+
+# this is a function for removing any directory matching a passed in pattern from
+# PATH
+remove_path_entry() {
+ save_IFS
+ IFS=":"
+ stripped_path="${PATH}"
+ while [ -n "$1" ]; do
+ cur_path=""
+ for p in ${stripped_path}; do
+ if [ "${p/${1}}" == "${p}" ]; then
+ cur_path="${cur_path}:${p}"
+ fi
+ done
+ stripped_path="${cur_path#:*}"
+ shift
+ done
+ restore_IFS
+ PATH="${stripped_path}"
+}
+
+# @FUNCTION: _ebuild_arg_to_phase
+# @DESCRIPTION:
+# Translate a known ebuild(1) argument into the precise
+# name of it's corresponding ebuild phase.
+_ebuild_arg_to_phase() {
+ [ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
+ local eapi=$1
+ local arg=$2
+ local phase_func=""
+
+ case "$arg" in
+ pretend)
+ ! has $eapi 0 1 2 3 3_pre2 && \
+ phase_func=pkg_pretend
+ ;;
+ setup)
+ phase_func=pkg_setup
+ ;;
+ nofetch)
+ phase_func=pkg_nofetch
+ ;;
+ unpack)
+ phase_func=src_unpack
+ ;;
+ prepare)
+ ! has $eapi 0 1 && \
+ phase_func=src_prepare
+ ;;
+ configure)
+ ! has $eapi 0 1 && \
+ phase_func=src_configure
+ ;;
+ compile)
+ phase_func=src_compile
+ ;;
+ test)
+ phase_func=src_test
+ ;;
+ install)
+ phase_func=src_install
+ ;;
+ preinst)
+ phase_func=pkg_preinst
+ ;;
+ postinst)
+ phase_func=pkg_postinst
+ ;;
+ prerm)
+ phase_func=pkg_prerm
+ ;;
+ postrm)
+ phase_func=pkg_postrm
+ ;;
+ esac
+
+ [[ -z $phase_func ]] && return 1
+ echo "$phase_func"
+ return 0
+}
+
+_ebuild_phase_funcs() {
+ [ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
+ local eapi=$1
+ local phase_func=$2
+ local default_phases="pkg_nofetch src_unpack src_prepare src_configure
+ src_compile src_install src_test"
+ local x y default_func=""
+
+ for x in pkg_nofetch src_unpack src_test ; do
+ declare -F $x >/dev/null || \
+ eval "$x() { _eapi0_$x \"\$@\" ; }"
+ done
+
+ case $eapi in
+
+ 0|1)
+
+ if ! declare -F src_compile >/dev/null ; then
+ case $eapi in
+ 0)
+ src_compile() { _eapi0_src_compile "$@" ; }
+ ;;
+ *)
+ src_compile() { _eapi1_src_compile "$@" ; }
+ ;;
+ esac
+ fi
+
+ for x in $default_phases ; do
+ eval "default_$x() {
+ die \"default_$x() is not supported with EAPI='$eapi' during phase $phase_func\"
+ }"
+ done
+
+ eval "default() {
+ die \"default() is not supported with EAPI='$eapi' during phase $phase_func\"
+ }"
+
+ ;;
+
+ *)
+
+ declare -F src_configure >/dev/null || \
+ src_configure() { _eapi2_src_configure "$@" ; }
+
+ declare -F src_compile >/dev/null || \
+ src_compile() { _eapi2_src_compile "$@" ; }
+
+ has $eapi 2 3 3_pre2 || declare -F src_install >/dev/null || \
+ src_install() { _eapi4_src_install "$@" ; }
+
+ if has $phase_func $default_phases ; then
+
+ _eapi2_pkg_nofetch () { _eapi0_pkg_nofetch "$@" ; }
+ _eapi2_src_unpack () { _eapi0_src_unpack "$@" ; }
+ _eapi2_src_prepare () { true ; }
+ _eapi2_src_test () { _eapi0_src_test "$@" ; }
+ _eapi2_src_install () { die "$FUNCNAME is not supported" ; }
+
+ for x in $default_phases ; do
+ eval "default_$x() { _eapi2_$x \"\$@\" ; }"
+ done
+
+ eval "default() { _eapi2_$phase_func \"\$@\" ; }"
+
+ case $eapi in
+ 2|3)
+ ;;
+ *)
+ eval "default_src_install() { _eapi4_src_install \"\$@\" ; }"
+ [[ $phase_func = src_install ]] && \
+ eval "default() { _eapi4_$phase_func \"\$@\" ; }"
+ ;;
+ esac
+
+ else
+
+ for x in $default_phases ; do
+ eval "default_$x() {
+ die \"default_$x() is not supported in phase $default_func\"
+ }"
+ done
+
+ eval "default() {
+ die \"default() is not supported with EAPI='$eapi' during phase $phase_func\"
+ }"
+
+ fi
+
+ ;;
+ esac
+}
+
+# Set given variables unless these variable have been already set (e.g. during emerge
+# invocation) to values different than values set in make.conf.
+set_unless_changed() {
+ if [[ $# -lt 1 ]]; then
+ die "${FUNCNAME}() requires at least 1 argument: VARIABLE=VALUE"
+ fi
+
+ local argument value variable
+ for argument in "$@"; do
+ if [[ ${argument} != *=* ]]; then
+ die "${FUNCNAME}(): Argument '${argument}' has incorrect syntax"
+ fi
+ variable="${argument%%=*}"
+ value="${argument#*=}"
+ if eval "[[ \${${variable}} == \$(env -u ${variable} portageq envvar ${variable}) ]]"; then
+ eval "${variable}=\"\${value}\""
+ fi
+ done
+}
+
+# Unset given variables unless these variable have been set (e.g. during emerge
+# invocation) to values different than values set in make.conf.
+unset_unless_changed() {
+ if [[ $# -lt 1 ]]; then
+ die "${FUNCNAME}() requires at least 1 argument: VARIABLE"
+ fi
+
+ local variable
+ for variable in "$@"; do
+ if eval "[[ \${${variable}} == \$(env -u ${variable} portageq envvar ${variable}) ]]"; then
+ unset ${variable}
+ fi
+ done
+}
+
+PORTAGE_BASHRCS_SOURCED=0
+
+# @FUNCTION: source_all_bashrcs
+# @DESCRIPTION:
+# Source a relevant bashrc files and perform other miscellaneous
+# environment initialization when appropriate.
+#
+# If EAPI is set then define functions provided by the current EAPI:
+#
+# * default_* aliases for the current EAPI phase functions
+# * A "default" function which is an alias for the default phase
+# function for the current phase.
+#
+source_all_bashrcs() {
+ [[ $PORTAGE_BASHRCS_SOURCED = 1 ]] && return 0
+ PORTAGE_BASHRCS_SOURCED=1
+ local x
+
+ local OCC="${CC}" OCXX="${CXX}"
+
+ if [[ $EBUILD_PHASE != depend ]] ; then
+ # source the existing profile.bashrcs.
+ save_IFS
+ IFS=$'\n'
+ local path_array=($PROFILE_PATHS)
+ restore_IFS
+ for x in "${path_array[@]}" ; do
+ [ -f "$x/profile.bashrc" ] && qa_source "$x/profile.bashrc"
+ done
+ fi
+
+ # We assume if people are changing shopts in their bashrc they do so at their
+ # own peril. This is the ONLY non-portage bit of code that can change shopts
+ # without a QA violation.
+ for x in "${PORTAGE_BASHRC}" "${PM_EBUILD_HOOK_DIR}"/${CATEGORY}/{${PN},${PN}:${SLOT},${P},${PF}}; do
+ if [ -r "${x}" ]; then
+ # If $- contains x, then tracing has already enabled elsewhere for some
+ # reason. We preserve it's state so as not to interfere.
+ if [ "$PORTAGE_DEBUG" != "1" ] || [ "${-/x/}" != "$-" ]; then
+ source "${x}"
+ else
+ set -x
+ source "${x}"
+ set +x
+ fi
+ fi
+ done
+
+ [ ! -z "${OCC}" ] && export CC="${OCC}"
+ [ ! -z "${OCXX}" ] && export CXX="${OCXX}"
+}
+
+# Hardcoded bash lists are needed for backward compatibility with
+# <portage-2.1.4 since they assume that a newly installed version
+# of ebuild.sh will work for pkg_postinst, pkg_prerm, and pkg_postrm
+# when portage is upgrading itself.
+
+PORTAGE_READONLY_METADATA="DEFINED_PHASES DEPEND DESCRIPTION
+ EAPI HOMEPAGE INHERITED IUSE REQUIRED_USE KEYWORDS LICENSE
+ PDEPEND PROVIDE RDEPEND RESTRICT SLOT SRC_URI"
+
+PORTAGE_READONLY_VARS="D EBUILD EBUILD_PHASE \
+ EBUILD_SH_ARGS ECLASSDIR EMERGE_FROM FILESDIR MERGE_TYPE \
+ PM_EBUILD_HOOK_DIR \
+ PORTAGE_ACTUAL_DISTDIR PORTAGE_ARCHLIST PORTAGE_BASHRC \
+ PORTAGE_BINPKG_FILE PORTAGE_BINPKG_TAR_OPTS PORTAGE_BINPKG_TMPFILE \
+ PORTAGE_BIN_PATH PORTAGE_BUILDDIR PORTAGE_BUNZIP2_COMMAND \
+ PORTAGE_BZIP2_COMMAND PORTAGE_COLORMAP PORTAGE_CONFIGROOT \
+ PORTAGE_DEBUG PORTAGE_DEPCACHEDIR PORTAGE_EBUILD_EXIT_FILE \
+ PORTAGE_GID PORTAGE_GRPNAME PORTAGE_INST_GID PORTAGE_INST_UID \
+ PORTAGE_IPC_DAEMON PORTAGE_IUSE PORTAGE_LOG_FILE \
+ PORTAGE_MUTABLE_FILTERED_VARS PORTAGE_PYM_PATH PORTAGE_PYTHON \
+ PORTAGE_READONLY_METADATA PORTAGE_READONLY_VARS \
+ PORTAGE_REPO_NAME PORTAGE_RESTRICT PORTAGE_SANDBOX_COMPAT_LEVEL \
+ PORTAGE_SAVED_READONLY_VARS PORTAGE_SIGPIPE_STATUS \
+ PORTAGE_TMPDIR PORTAGE_UPDATE_ENV PORTAGE_USERNAME \
+ PORTAGE_VERBOSE PORTAGE_WORKDIR_MODE PORTDIR PORTDIR_OVERLAY \
+ PROFILE_PATHS REPLACING_VERSIONS REPLACED_BY_VERSION T WORKDIR"
+
+PORTAGE_SAVED_READONLY_VARS="A CATEGORY P PF PN PR PV PVR"
+
+# Variables that portage sets but doesn't mark readonly.
+# In order to prevent changed values from causing unexpected
+# interference, they are filtered out of the environment when
+# it is saved or loaded (any mutations do not persist).
+PORTAGE_MUTABLE_FILTERED_VARS="AA HOSTNAME"
+
+# @FUNCTION: filter_readonly_variables
+# @DESCRIPTION: [--filter-sandbox] [--allow-extra-vars]
+# Read an environment from stdin and echo to stdout while filtering variables
+# with names that are known to cause interference:
+#
+# * some specific variables for which bash does not allow assignment
+# * some specific variables that affect portage or sandbox behavior
+# * variable names that begin with a digit or that contain any
+# non-alphanumeric characters that are not be supported by bash
+#
+# --filter-sandbox causes all SANDBOX_* variables to be filtered, which
+# is only desired in certain cases, such as during preprocessing or when
+# saving environment.bz2 for a binary or installed package.
+#
+# --filter-features causes the special FEATURES variable to be filtered.
+# Generally, we want it to persist between phases since the user might
+# want to modify it via bashrc to enable things like splitdebug and
+# installsources for specific packages. They should be able to modify it
+# in pre_pkg_setup() and have it persist all the way through the install
+# phase. However, if FEATURES exist inside environment.bz2 then they
+# should be overridden by current settings.
+#
+# --filter-locale causes locale related variables such as LANG and LC_*
+# variables to be filtered. These variables should persist between phases,
+# in case they are modified by the ebuild. However, the current user
+# settings should be used when loading the environment from a binary or
+# installed package.
+#
+# --filter-path causes the PATH variable to be filtered. This variable
+# should persist between phases, in case it is modified by the ebuild.
+# However, old settings should be overridden when loading the
+# environment from a binary or installed package.
+#
+# ---allow-extra-vars causes some extra vars to be allowd through, such
+# as ${PORTAGE_SAVED_READONLY_VARS} and ${PORTAGE_MUTABLE_FILTERED_VARS}.
+#
+# In bash-3.2_p20+ an attempt to assign BASH_*, FUNCNAME, GROUPS or any
+# readonly variable cause the shell to exit while executing the "source"
+# builtin command. To avoid this problem, this function filters those
+# variables out and discards them. See bug #190128.
+filter_readonly_variables() {
+ local x filtered_vars
+ local readonly_bash_vars="BASHOPTS BASHPID DIRSTACK EUID
+ FUNCNAME GROUPS PIPESTATUS PPID SHELLOPTS UID"
+ local bash_misc_vars="BASH BASH_.* COMP_WORDBREAKS HISTCMD
+ HISTFILE HOSTNAME HOSTTYPE IFS LINENO MACHTYPE OLDPWD
+ OPTERR OPTIND OSTYPE POSIXLY_CORRECT PS4 PWD RANDOM
+ SECONDS SHELL SHLVL"
+ local filtered_sandbox_vars="SANDBOX_ACTIVE SANDBOX_BASHRC
+ SANDBOX_DEBUG_LOG SANDBOX_DISABLED SANDBOX_LIB
+ SANDBOX_LOG SANDBOX_ON"
+ local misc_garbage_vars="_portage_filter_opts"
+ filtered_vars="$readonly_bash_vars $bash_misc_vars
+ $PORTAGE_READONLY_VARS $misc_garbage_vars"
+
+ # Don't filter/interfere with prefix variables unless they are
+ # supported by the current EAPI.
+ case "${EAPI:-0}" in
+ 0|1|2)
+ ;;
+ *)
+ filtered_vars+=" ED EPREFIX EROOT"
+ ;;
+ esac
+
+ if has --filter-sandbox $* ; then
+ filtered_vars="${filtered_vars} SANDBOX_.*"
+ else
+ filtered_vars="${filtered_vars} ${filtered_sandbox_vars}"
+ fi
+ if has --filter-features $* ; then
+ filtered_vars="${filtered_vars} FEATURES PORTAGE_FEATURES"
+ fi
+ if has --filter-path $* ; then
+ filtered_vars+=" PATH"
+ fi
+ if has --filter-locale $* ; then
+ filtered_vars+=" LANG LC_ALL LC_COLLATE
+ LC_CTYPE LC_MESSAGES LC_MONETARY
+ LC_NUMERIC LC_PAPER LC_TIME"
+ fi
+ if ! has --allow-extra-vars $* ; then
+ filtered_vars="
+ ${filtered_vars}
+ ${PORTAGE_SAVED_READONLY_VARS}
+ ${PORTAGE_MUTABLE_FILTERED_VARS}
+ "
+ fi
+
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}"/filter-bash-environment.py "${filtered_vars}" || die "filter-bash-environment.py failed"
+}
+
+# @FUNCTION: preprocess_ebuild_env
+# @DESCRIPTION:
+# Filter any readonly variables from ${T}/environment, source it, and then
+# save it via save_ebuild_env(). This process should be sufficient to prevent
+# any stale variables or functions from an arbitrary environment from
+# interfering with the current environment. This is useful when an existing
+# environment needs to be loaded from a binary or installed package.
+preprocess_ebuild_env() {
+ local _portage_filter_opts="--filter-features --filter-locale --filter-path --filter-sandbox"
+
+ # If environment.raw is present, this is a signal from the python side,
+ # indicating that the environment may contain stale FEATURES and
+ # SANDBOX_{DENY,PREDICT,READ,WRITE} variables that should be filtered out.
+ # Otherwise, we don't need to filter the environment.
+ [ -f "${T}/environment.raw" ] || return 0
+
+ filter_readonly_variables $_portage_filter_opts < "${T}"/environment \
+ >> "$T/environment.filtered" || return $?
+ unset _portage_filter_opts
+ mv "${T}"/environment.filtered "${T}"/environment || return $?
+ rm -f "${T}/environment.success" || return $?
+ # WARNING: Code inside this subshell should avoid making assumptions
+ # about variables or functions after source "${T}"/environment has been
+ # called. Any variables that need to be relied upon should already be
+ # filtered out above.
+ (
+ export SANDBOX_ON=1
+ source "${T}/environment" || exit $?
+ # We have to temporarily disable sandbox since the
+ # SANDBOX_{DENY,READ,PREDICT,WRITE} values we've just loaded
+ # may be unusable (triggering in spurious sandbox violations)
+ # until we've merged them with our current values.
+ export SANDBOX_ON=0
+
+ # It's remotely possible that save_ebuild_env() has been overridden
+ # by the above source command. To protect ourselves, we override it
+ # here with our own version. ${PORTAGE_BIN_PATH} is safe to use here
+ # because it's already filtered above.
+ source "${PORTAGE_BIN_PATH}/isolated-functions.sh" || exit $?
+
+ # Rely on save_ebuild_env() to filter out any remaining variables
+ # and functions that could interfere with the current environment.
+ save_ebuild_env || exit $?
+ >> "$T/environment.success" || exit $?
+ ) > "${T}/environment.filtered"
+ local retval
+ if [ -e "${T}/environment.success" ] ; then
+ filter_readonly_variables --filter-features < \
+ "${T}/environment.filtered" > "${T}/environment"
+ retval=$?
+ else
+ retval=1
+ fi
+ rm -f "${T}"/environment.{filtered,raw,success}
+ return ${retval}
+}
+
+# === === === === === === === === === === === === === === === === === ===
+# === === === === === functions end, main part begins === === === === ===
+# === === === === === functions end, main part begins === === === === ===
+# === === === === === functions end, main part begins === === === === ===
+# === === === === === === === === === === === === === === === === === ===
+
+export SANDBOX_ON="1"
+export S=${WORKDIR}/${P}
+
+unset E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND
+
+# Turn of extended glob matching so that g++ doesn't get incorrectly matched.
+shopt -u extglob
+
+if [[ ${EBUILD_PHASE} == depend ]] ; then
+ QA_INTERCEPTORS="awk bash cc egrep equery fgrep g++
+ gawk gcc grep javac java-config nawk perl
+ pkg-config python python-config sed"
+elif [[ ${EBUILD_PHASE} == clean* ]] ; then
+ unset QA_INTERCEPTORS
+else
+ QA_INTERCEPTORS="autoconf automake aclocal libtoolize"
+fi
+# level the QA interceptors if we're in depend
+if [[ -n ${QA_INTERCEPTORS} ]] ; then
+ for BIN in ${QA_INTERCEPTORS}; do
+ BIN_PATH=$(type -Pf ${BIN})
+ if [ "$?" != "0" ]; then
+ BODY="echo \"*** missing command: ${BIN}\" >&2; return 127"
+ else
+ BODY="${BIN_PATH} \"\$@\"; return \$?"
+ fi
+ if [[ ${EBUILD_PHASE} == depend ]] ; then
+ FUNC_SRC="${BIN}() {
+ if [ \$ECLASS_DEPTH -gt 0 ]; then
+ eqawarn \"QA Notice: '${BIN}' called in global scope: eclass \${ECLASS}\"
+ else
+ eqawarn \"QA Notice: '${BIN}' called in global scope: \${CATEGORY}/\${PF}\"
+ fi
+ ${BODY}
+ }"
+ elif has ${BIN} autoconf automake aclocal libtoolize ; then
+ FUNC_SRC="${BIN}() {
+ if ! has \${FUNCNAME[1]} eautoreconf eaclocal _elibtoolize \\
+ eautoheader eautoconf eautomake autotools_run_tool \\
+ autotools_check_macro autotools_get_subdirs \\
+ autotools_get_auxdir ; then
+ eqawarn \"QA Notice: '${BIN}' called by \${FUNCNAME[1]}: \${CATEGORY}/\${PF}\"
+ eqawarn \"Use autotools.eclass instead of calling '${BIN}' directly.\"
+ fi
+ ${BODY}
+ }"
+ else
+ FUNC_SRC="${BIN}() {
+ eqawarn \"QA Notice: '${BIN}' called by \${FUNCNAME[1]}: \${CATEGORY}/\${PF}\"
+ ${BODY}
+ }"
+ fi
+ eval "$FUNC_SRC" || echo "error creating QA interceptor ${BIN}" >&2
+ done
+ unset BIN_PATH BIN BODY FUNC_SRC
+fi
+
+# Subshell/helper die support (must export for the die helper).
+export EBUILD_MASTER_PID=$BASHPID
+trap 'exit 1' SIGTERM
+
+if ! has "$EBUILD_PHASE" clean cleanrm depend && \
+ [ -f "${T}"/environment ] ; then
+ # The environment may have been extracted from environment.bz2 or
+ # may have come from another version of ebuild.sh or something.
+ # In any case, preprocess it to prevent any potential interference.
+ preprocess_ebuild_env || \
+ die "error processing environment"
+ # Colon separated SANDBOX_* variables need to be cumulative.
+ for x in SANDBOX_DENY SANDBOX_READ SANDBOX_PREDICT SANDBOX_WRITE ; do
+ export PORTAGE_${x}=${!x}
+ done
+ PORTAGE_SANDBOX_ON=${SANDBOX_ON}
+ export SANDBOX_ON=1
+ source "${T}"/environment || \
+ die "error sourcing environment"
+ # We have to temporarily disable sandbox since the
+ # SANDBOX_{DENY,READ,PREDICT,WRITE} values we've just loaded
+ # may be unusable (triggering in spurious sandbox violations)
+ # until we've merged them with our current values.
+ export SANDBOX_ON=0
+ for x in SANDBOX_DENY SANDBOX_PREDICT SANDBOX_READ SANDBOX_WRITE ; do
+ y="PORTAGE_${x}"
+ if [ -z "${!x}" ] ; then
+ export ${x}=${!y}
+ elif [ -n "${!y}" ] && [ "${!y}" != "${!x}" ] ; then
+ # filter out dupes
+ export ${x}=$(printf "${!y}:${!x}" | tr ":" "\0" | \
+ sort -z -u | tr "\0" ":")
+ fi
+ export ${x}=${!x%:}
+ unset PORTAGE_${x}
+ done
+ unset x y
+ export SANDBOX_ON=${PORTAGE_SANDBOX_ON}
+ unset PORTAGE_SANDBOX_ON
+ [[ -n $EAPI ]] || EAPI=0
+fi
+
+if ! has "$EBUILD_PHASE" clean cleanrm ; then
+ if [[ $EBUILD_PHASE = depend || ! -f $T/environment || \
+ -f $PORTAGE_BUILDDIR/.ebuild_changed ]] || \
+ has noauto $FEATURES ; then
+ # The bashrcs get an opportunity here to set aliases that will be expanded
+ # during sourcing of ebuilds and eclasses.
+ source_all_bashrcs
+
+ # When EBUILD_PHASE != depend, INHERITED comes pre-initialized
+ # from cache. In order to make INHERITED content independent of
+ # EBUILD_PHASE during inherit() calls, we unset INHERITED after
+ # we make a backup copy for QA checks.
+ __INHERITED_QA_CACHE=$INHERITED
+
+ # *DEPEND and IUSE will be set during the sourcing of the ebuild.
+ # In order to ensure correct interaction between ebuilds and
+ # eclasses, they need to be unset before this process of
+ # interaction begins.
+ unset DEPEND RDEPEND PDEPEND INHERITED IUSE REQUIRED_USE \
+ ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND
+
+ if [[ $PORTAGE_DEBUG != 1 || ${-/x/} != $- ]] ; then
+ source "$EBUILD" || die "error sourcing ebuild"
+ else
+ set -x
+ source "$EBUILD" || die "error sourcing ebuild"
+ set +x
+ fi
+
+ if [[ "${EBUILD_PHASE}" != "depend" ]] ; then
+ RESTRICT=${PORTAGE_RESTRICT}
+ [[ -e $PORTAGE_BUILDDIR/.ebuild_changed ]] && \
+ rm "$PORTAGE_BUILDDIR/.ebuild_changed"
+ fi
+
+ [[ -n $EAPI ]] || EAPI=0
+
+ if has "$EAPI" 0 1 2 3 3_pre2 ; then
+ export RDEPEND=${RDEPEND-${DEPEND}}
+ debug-print "RDEPEND: not set... Setting to: ${DEPEND}"
+ fi
+
+ # add in dependency info from eclasses
+ IUSE="${IUSE} ${E_IUSE}"
+ DEPEND="${DEPEND} ${E_DEPEND}"
+ RDEPEND="${RDEPEND} ${E_RDEPEND}"
+ PDEPEND="${PDEPEND} ${E_PDEPEND}"
+ REQUIRED_USE="${REQUIRED_USE} ${E_REQUIRED_USE}"
+
+ unset ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND \
+ __INHERITED_QA_CACHE
+
+ # alphabetically ordered by $EBUILD_PHASE value
+ case "$EAPI" in
+ 0|1)
+ _valid_phases="src_compile pkg_config pkg_info src_install
+ pkg_nofetch pkg_postinst pkg_postrm pkg_preinst pkg_prerm
+ pkg_setup src_test src_unpack"
+ ;;
+ 2|3|3_pre2)
+ _valid_phases="src_compile pkg_config src_configure pkg_info
+ src_install pkg_nofetch pkg_postinst pkg_postrm pkg_preinst
+ src_prepare pkg_prerm pkg_setup src_test src_unpack"
+ ;;
+ *)
+ _valid_phases="src_compile pkg_config src_configure pkg_info
+ src_install pkg_nofetch pkg_postinst pkg_postrm pkg_preinst
+ src_prepare pkg_prerm pkg_pretend pkg_setup src_test src_unpack"
+ ;;
+ esac
+
+ DEFINED_PHASES=
+ for _f in $_valid_phases ; do
+ if declare -F $_f >/dev/null ; then
+ _f=${_f#pkg_}
+ DEFINED_PHASES+=" ${_f#src_}"
+ fi
+ done
+ [[ -n $DEFINED_PHASES ]] || DEFINED_PHASES=-
+
+ unset _f _valid_phases
+
+ if [[ $EBUILD_PHASE != depend ]] ; then
+
+ case "$EAPI" in
+ 0|1|2|3)
+ _ebuild_helpers_path="$PORTAGE_BIN_PATH/ebuild-helpers"
+ ;;
+ *)
+ _ebuild_helpers_path="$PORTAGE_BIN_PATH/ebuild-helpers/4:$PORTAGE_BIN_PATH/ebuild-helpers"
+ ;;
+ esac
+
+ PATH=$_ebuild_helpers_path:$PREROOTPATH${PREROOTPATH:+:}/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin${ROOTPATH:+:}$ROOTPATH
+ unset _ebuild_helpers_path
+
+ # Use default ABI libdir in accordance with bug #355283.
+ x=LIBDIR_${DEFAULT_ABI}
+ [[ -n $DEFAULT_ABI && -n ${!x} ]] && x=${!x} || x=lib
+
+ if has distcc $FEATURES ; then
+ PATH="/usr/$x/distcc/bin:$PATH"
+ [[ -n $DISTCC_LOG ]] && addwrite "${DISTCC_LOG%/*}"
+ fi
+
+ if has ccache $FEATURES ; then
+ PATH="/usr/$x/ccache/bin:$PATH"
+
+ if [[ -n $CCACHE_DIR ]] ; then
+ addread "$CCACHE_DIR"
+ addwrite "$CCACHE_DIR"
+ fi
+
+ [[ -n $CCACHE_SIZE ]] && ccache -M $CCACHE_SIZE &> /dev/null
+ fi
+
+ unset x
+
+ if [[ -n $QA_PREBUILT ]] ; then
+
+ # these ones support fnmatch patterns
+ QA_EXECSTACK+=" $QA_PREBUILT"
+ QA_TEXTRELS+=" $QA_PREBUILT"
+ QA_WX_LOAD+=" $QA_PREBUILT"
+
+ # these ones support regular expressions, so translate
+ # fnmatch patterns to regular expressions
+ for x in QA_DT_HASH QA_DT_NEEDED QA_PRESTRIPPED QA_SONAME ; do
+ if [[ $(declare -p $x 2>/dev/null) = declare\ -a* ]] ; then
+ eval "$x=(\"\${$x[@]}\" ${QA_PREBUILT//\*/.*})"
+ else
+ eval "$x+=\" ${QA_PREBUILT//\*/.*}\""
+ fi
+ done
+
+ unset x
+ fi
+
+ # This needs to be exported since prepstrip is a separate shell script.
+ [[ -n $QA_PRESTRIPPED ]] && export QA_PRESTRIPPED
+ eval "[[ -n \$QA_PRESTRIPPED_${ARCH/-/_} ]] && \
+ export QA_PRESTRIPPED_${ARCH/-/_}"
+ fi
+ fi
+fi
+
+# unset USE_EXPAND variables that contain only the special "*" token
+for x in ${USE_EXPAND} ; do
+ [ "${!x}" == "*" ] && unset ${x}
+done
+unset x
+
+if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT}
+then
+ export DEBUGBUILD=1
+fi
+
+#a reasonable default for $S
+[[ -z ${S} ]] && export S=${WORKDIR}/${P}
+
+# Note: readonly variables interfere with preprocess_ebuild_env(), so
+# declare them only after it has already run.
+if [ "${EBUILD_PHASE}" != "depend" ] ; then
+ declare -r $PORTAGE_READONLY_METADATA $PORTAGE_READONLY_VARS
+ case "$EAPI" in
+ 0|1|2)
+ ;;
+ *)
+ declare -r ED EPREFIX EROOT
+ ;;
+ esac
+fi
+
+ebuild_main() {
+
+ # Subshell/helper die support (must export for the die helper).
+ # Since this function is typically executed in a subshell,
+ # setup EBUILD_MASTER_PID to refer to the current $BASHPID,
+ # which seems to give the best results when further
+ # nested subshells call die.
+ export EBUILD_MASTER_PID=$BASHPID
+ trap 'exit 1' SIGTERM
+
+ if [[ $EBUILD_PHASE != depend ]] ; then
+ # Force configure scripts that automatically detect ccache to
+ # respect FEATURES="-ccache".
+ has ccache $FEATURES || export CCACHE_DISABLE=1
+
+ local phase_func=$(_ebuild_arg_to_phase "$EAPI" "$EBUILD_PHASE")
+ [[ -n $phase_func ]] && _ebuild_phase_funcs "$EAPI" "$phase_func"
+ unset phase_func
+ fi
+
+ source_all_bashrcs
+
+ case ${EBUILD_SH_ARGS} in
+ nofetch)
+ ebuild_phase_with_hooks pkg_nofetch
+ ;;
+ prerm|postrm|postinst|config|info)
+ if has "$EBUILD_SH_ARGS" config info && \
+ ! declare -F "pkg_$EBUILD_SH_ARGS" >/dev/null ; then
+ ewarn "pkg_${EBUILD_SH_ARGS}() is not defined: '${EBUILD##*/}'"
+ fi
+ export SANDBOX_ON="0"
+ if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+ ebuild_phase_with_hooks pkg_${EBUILD_SH_ARGS}
+ else
+ set -x
+ ebuild_phase_with_hooks pkg_${EBUILD_SH_ARGS}
+ set +x
+ fi
+ if [[ $EBUILD_PHASE == postinst ]] && [[ -n $PORTAGE_UPDATE_ENV ]]; then
+ # Update environment.bz2 in case installation phases
+ # need to pass some variables to uninstallation phases.
+ save_ebuild_env --exclude-init-phases | \
+ filter_readonly_variables --filter-path \
+ --filter-sandbox --allow-extra-vars \
+ | ${PORTAGE_BZIP2_COMMAND} -c -f9 > "$PORTAGE_UPDATE_ENV"
+ assert "save_ebuild_env failed"
+ fi
+ ;;
+ unpack|prepare|configure|compile|test|clean|install)
+ if [[ ${SANDBOX_DISABLED:-0} = 0 ]] ; then
+ export SANDBOX_ON="1"
+ else
+ export SANDBOX_ON="0"
+ fi
+
+ case "$EBUILD_SH_ARGS" in
+ configure|compile)
+
+ local x
+ for x in ASFLAGS CCACHE_DIR CCACHE_SIZE \
+ CFLAGS CXXFLAGS LDFLAGS LIBCFLAGS LIBCXXFLAGS ; do
+ [[ ${!x+set} = set ]] && export $x
+ done
+ unset x
+
+ has distcc $FEATURES && [[ -n $DISTCC_DIR ]] && \
+ [[ ${SANDBOX_WRITE/$DISTCC_DIR} = $SANDBOX_WRITE ]] && \
+ addwrite "$DISTCC_DIR"
+
+ x=LIBDIR_$ABI
+ [ -z "$PKG_CONFIG_PATH" -a -n "$ABI" -a -n "${!x}" ] && \
+ export PKG_CONFIG_PATH=/usr/${!x}/pkgconfig
+
+ if has noauto $FEATURES && \
+ [[ ! -f $PORTAGE_BUILDDIR/.unpacked ]] ; then
+ echo
+ echo "!!! We apparently haven't unpacked..." \
+ "This is probably not what you"
+ echo "!!! want to be doing... You are using" \
+ "FEATURES=noauto so I'll assume"
+ echo "!!! that you know what you are doing..." \
+ "You have 5 seconds to abort..."
+ echo
+
+ local x
+ for x in 1 2 3 4 5 6 7 8; do
+ LC_ALL=C sleep 0.25
+ done
+
+ sleep 3
+ fi
+
+ cd "$PORTAGE_BUILDDIR"
+ if [ ! -d build-info ] ; then
+ mkdir build-info
+ cp "$EBUILD" "build-info/$PF.ebuild"
+ fi
+
+ #our custom version of libtool uses $S and $D to fix
+ #invalid paths in .la files
+ export S D
+
+ ;;
+ esac
+
+ if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+ dyn_${EBUILD_SH_ARGS}
+ else
+ set -x
+ dyn_${EBUILD_SH_ARGS}
+ set +x
+ fi
+ export SANDBOX_ON="0"
+ ;;
+ help|pretend|setup|preinst)
+ #pkg_setup needs to be out of the sandbox for tmp file creation;
+ #for example, awking and piping a file in /tmp requires a temp file to be created
+ #in /etc. If pkg_setup is in the sandbox, both our lilo and apache ebuilds break.
+ export SANDBOX_ON="0"
+ if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+ dyn_${EBUILD_SH_ARGS}
+ else
+ set -x
+ dyn_${EBUILD_SH_ARGS}
+ set +x
+ fi
+ ;;
+ depend)
+ export SANDBOX_ON="0"
+ set -f
+
+ if [ -n "${dbkey}" ] ; then
+ if [ ! -d "${dbkey%/*}" ]; then
+ install -d -g ${PORTAGE_GID} -m2775 "${dbkey%/*}"
+ fi
+ # Make it group writable. 666&~002==664
+ umask 002
+ fi
+
+ auxdbkeys="DEPEND RDEPEND SLOT SRC_URI RESTRICT HOMEPAGE LICENSE
+ DESCRIPTION KEYWORDS INHERITED IUSE REQUIRED_USE PDEPEND PROVIDE EAPI
+ PROPERTIES DEFINED_PHASES UNUSED_05 UNUSED_04
+ UNUSED_03 UNUSED_02 UNUSED_01"
+
+ #the extra $(echo) commands remove newlines
+ [ -n "${EAPI}" ] || EAPI=0
+
+ if [ -n "${dbkey}" ] ; then
+ > "${dbkey}"
+ for f in ${auxdbkeys} ; do
+ echo $(echo ${!f}) >> "${dbkey}" || exit $?
+ done
+ else
+ for f in ${auxdbkeys} ; do
+ echo $(echo ${!f}) 1>&9 || exit $?
+ done
+ exec 9>&-
+ fi
+ set +f
+ ;;
+ _internal_test)
+ ;;
+ *)
+ export SANDBOX_ON="1"
+ echo "Unrecognized EBUILD_SH_ARGS: '${EBUILD_SH_ARGS}'"
+ echo
+ dyn_help
+ exit 1
+ ;;
+ esac
+}
+
+if [[ -s $SANDBOX_LOG ]] ; then
+ # We use SANDBOX_LOG to check for sandbox violations,
+ # so we ensure that there can't be a stale log to
+ # interfere with our logic.
+ x=
+ if [[ -n SANDBOX_ON ]] ; then
+ x=$SANDBOX_ON
+ export SANDBOX_ON=0
+ fi
+
+ rm -f "$SANDBOX_LOG" || \
+ die "failed to remove stale sandbox log: '$SANDBOX_LOG'"
+
+ if [[ -n $x ]] ; then
+ export SANDBOX_ON=$x
+ fi
+ unset x
+fi
+
+if [[ $EBUILD_PHASE = depend ]] ; then
+ ebuild_main
+elif [[ -n $EBUILD_SH_ARGS ]] ; then
+ (
+ # Don't allow subprocesses to inherit the pipe which
+ # emerge uses to monitor ebuild.sh.
+ exec 9>&-
+
+ ebuild_main
+
+ # Save the env only for relevant phases.
+ if ! has "$EBUILD_SH_ARGS" clean help info nofetch ; then
+ umask 002
+ save_ebuild_env | filter_readonly_variables \
+ --filter-features > "$T/environment"
+ assert "save_ebuild_env failed"
+ chown portage:portage "$T/environment" &>/dev/null
+ chmod g+w "$T/environment" &>/dev/null
+ fi
+ [[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
+ if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+ [[ ! -s $SANDBOX_LOG ]]
+ "$PORTAGE_BIN_PATH"/ebuild-ipc exit $?
+ fi
+ exit 0
+ )
+ exit $?
+fi
+
+# Do not exit when ebuild.sh is sourced by other scripts.
+true
diff --git a/portage_with_autodep/bin/egencache b/portage_with_autodep/bin/egencache
new file mode 100755
index 0000000..1b4265d
--- /dev/null
+++ b/portage_with_autodep/bin/egencache
@@ -0,0 +1,851 @@
+#!/usr/bin/python
+# Copyright 2009-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+ def exithandler(signum,frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGINT, exithandler)
+ signal.signal(signal.SIGTERM, exithandler)
+
+except KeyboardInterrupt:
+ sys.exit(128 + signal.SIGINT)
+
+import io
+import logging
+import optparse
+import subprocess
+import time
+import textwrap
+import re
+
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+
+from portage import os, _encodings, _unicode_encode, _unicode_decode
+from _emerge.MetadataRegen import MetadataRegen
+from portage.cache.cache_errors import CacheError, StatCollision
+from portage.manifest import guessManifestFileType
+from portage.util import cmp_sort_key, writemsg_level
+from portage import cpv_getkey
+from portage.dep import Atom, isjustname
+from portage.versions import pkgcmp, pkgsplit, vercmp
+
+try:
+ from xml.etree import ElementTree
+except ImportError:
+ pass
+else:
+ try:
+ from xml.parsers.expat import ExpatError
+ except ImportError:
+ pass
+ else:
+ from repoman.utilities import parse_metadata_use
+
+from repoman.utilities import FindVCS
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+def parse_args(args):
+ usage = "egencache [options] <action> ... [atom] ..."
+ parser = optparse.OptionParser(usage=usage)
+
+ actions = optparse.OptionGroup(parser, 'Actions')
+ actions.add_option("--update",
+ action="store_true",
+ help="update metadata/cache/ (generate as necessary)")
+ actions.add_option("--update-use-local-desc",
+ action="store_true",
+ help="update the use.local.desc file from metadata.xml")
+ actions.add_option("--update-changelogs",
+ action="store_true",
+ help="update the ChangeLog files from SCM logs")
+ parser.add_option_group(actions)
+
+ common = optparse.OptionGroup(parser, 'Common options')
+ common.add_option("--repo",
+ action="store",
+ help="name of repo to operate on (default repo is located at $PORTDIR)")
+ common.add_option("--config-root",
+ help="location of portage config files",
+ dest="portage_configroot")
+ common.add_option("--portdir",
+ help="override the portage tree location",
+ dest="portdir")
+ common.add_option("--tolerant",
+ action="store_true",
+ help="exit successfully if only minor errors occurred")
+ common.add_option("--ignore-default-opts",
+ action="store_true",
+ help="do not use the EGENCACHE_DEFAULT_OPTS environment variable")
+ parser.add_option_group(common)
+
+ update = optparse.OptionGroup(parser, '--update options')
+ update.add_option("--cache-dir",
+ help="location of the metadata cache",
+ dest="cache_dir")
+ update.add_option("--jobs",
+ action="store",
+ help="max ebuild processes to spawn")
+ update.add_option("--load-average",
+ action="store",
+ help="max load allowed when spawning multiple jobs",
+ dest="load_average")
+ update.add_option("--rsync",
+ action="store_true",
+ help="enable rsync stat collision workaround " + \
+ "for bug 139134 (use with --update)")
+ parser.add_option_group(update)
+
+ uld = optparse.OptionGroup(parser, '--update-use-local-desc options')
+ uld.add_option("--preserve-comments",
+ action="store_true",
+ help="preserve the comments from the existing use.local.desc file")
+ uld.add_option("--use-local-desc-output",
+ help="output file for use.local.desc data (or '-' for stdout)",
+ dest="uld_output")
+ parser.add_option_group(uld)
+
+ options, args = parser.parse_args(args)
+
+ if options.jobs:
+ jobs = None
+ try:
+ jobs = int(options.jobs)
+ except ValueError:
+ jobs = -1
+
+ if jobs < 1:
+ parser.error("Invalid: --jobs='%s'" % \
+ (options.jobs,))
+
+ options.jobs = jobs
+
+ else:
+ options.jobs = None
+
+ if options.load_average:
+ try:
+ load_average = float(options.load_average)
+ except ValueError:
+ load_average = 0.0
+
+ if load_average <= 0.0:
+ parser.error("Invalid: --load-average='%s'" % \
+ (options.load_average,))
+
+ options.load_average = load_average
+
+ else:
+ options.load_average = None
+
+ options.config_root = options.portage_configroot
+ if options.config_root is not None and \
+ not os.path.isdir(options.config_root):
+ parser.error("Not a directory: --config-root='%s'" % \
+ (options.config_root,))
+
+ if options.cache_dir is not None and not os.path.isdir(options.cache_dir):
+ parser.error("Not a directory: --cache-dir='%s'" % \
+ (options.cache_dir,))
+
+ for atom in args:
+ try:
+ atom = portage.dep.Atom(atom)
+ except portage.exception.InvalidAtom:
+ parser.error('Invalid atom: %s' % (atom,))
+
+ if not isjustname(atom):
+ parser.error('Atom is too specific: %s' % (atom,))
+
+ if options.update_use_local_desc:
+ try:
+ ElementTree
+ ExpatError
+ except NameError:
+ parser.error('--update-use-local-desc requires python with USE=xml!')
+
+ if options.uld_output == '-' and options.preserve_comments:
+ parser.error('--preserve-comments can not be used when outputting to stdout')
+
+ return parser, options, args
+
+class GenCache(object):
+ def __init__(self, portdb, cp_iter=None, max_jobs=None, max_load=None,
+ rsync=False):
+ self._portdb = portdb
+ # We can globally cleanse stale cache only if we
+ # iterate over every single cp.
+ self._global_cleanse = cp_iter is None
+ if cp_iter is not None:
+ self._cp_set = set(cp_iter)
+ cp_iter = iter(self._cp_set)
+ self._cp_missing = self._cp_set.copy()
+ else:
+ self._cp_set = None
+ self._cp_missing = set()
+ self._regen = MetadataRegen(portdb, cp_iter=cp_iter,
+ consumer=self._metadata_callback,
+ max_jobs=max_jobs, max_load=max_load)
+ self.returncode = os.EX_OK
+ metadbmodule = portdb.settings.load_best_module("portdbapi.metadbmodule")
+ self._trg_cache = metadbmodule(portdb.porttrees[0],
+ "metadata/cache", portage.auxdbkeys[:])
+ if rsync:
+ self._trg_cache.raise_stat_collision = True
+ try:
+ self._trg_cache.ec = \
+ portdb._repo_info[portdb.porttrees[0]].eclass_db
+ except AttributeError:
+ pass
+ self._existing_nodes = set()
+
+ def _metadata_callback(self, cpv, ebuild_path, repo_path, metadata):
+ self._existing_nodes.add(cpv)
+ self._cp_missing.discard(cpv_getkey(cpv))
+ if metadata is not None:
+ if metadata.get('EAPI') == '0':
+ del metadata['EAPI']
+ try:
+ try:
+ self._trg_cache[cpv] = metadata
+ except StatCollision as sc:
+ # If the content of a cache entry changes and neither the
+ # file mtime nor size changes, it will prevent rsync from
+ # detecting changes. Cache backends may raise this
+ # exception from _setitem() if they detect this type of stat
+ # collision. These exceptions are handled by bumping the
+ # mtime on the ebuild (and the corresponding cache entry).
+ # See bug #139134.
+ max_mtime = sc.mtime
+ for ec, (loc, ec_mtime) in metadata['_eclasses_'].items():
+ if max_mtime < ec_mtime:
+ max_mtime = ec_mtime
+ if max_mtime == sc.mtime:
+ max_mtime += 1
+ max_mtime = long(max_mtime)
+ try:
+ os.utime(ebuild_path, (max_mtime, max_mtime))
+ except OSError as e:
+ self.returncode |= 1
+ writemsg_level(
+ "%s writing target: %s\n" % (cpv, e),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ metadata['_mtime_'] = max_mtime
+ self._trg_cache[cpv] = metadata
+ self._portdb.auxdb[repo_path][cpv] = metadata
+
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "%s writing target: %s\n" % (cpv, ce),
+ level=logging.ERROR, noiselevel=-1)
+
+ def run(self):
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ self._regen.terminate()
+ received_signal.append(128 + signum)
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+ try:
+ self._regen.run()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ if received_signal:
+ sys.exit(received_signal[0])
+
+ self.returncode |= self._regen.returncode
+ cp_missing = self._cp_missing
+
+ trg_cache = self._trg_cache
+ dead_nodes = set()
+ if self._global_cleanse:
+ try:
+ for cpv in trg_cache:
+ cp = cpv_getkey(cpv)
+ if cp is None:
+ self.returncode |= 1
+ writemsg_level(
+ "Unable to parse cp for '%s'\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ dead_nodes.add(cpv)
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "Error listing cache entries for " + \
+ "'%s/metadata/cache': %s, continuing...\n" % \
+ (self._portdb.porttree_root, ce),
+ level=logging.ERROR, noiselevel=-1)
+
+ else:
+ cp_set = self._cp_set
+ try:
+ for cpv in trg_cache:
+ cp = cpv_getkey(cpv)
+ if cp is None:
+ self.returncode |= 1
+ writemsg_level(
+ "Unable to parse cp for '%s'\n" % (cpv,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ cp_missing.discard(cp)
+ if cp in cp_set:
+ dead_nodes.add(cpv)
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "Error listing cache entries for " + \
+ "'%s/metadata/cache': %s, continuing...\n" % \
+ (self._portdb.porttree_root, ce),
+ level=logging.ERROR, noiselevel=-1)
+
+ if cp_missing:
+ self.returncode |= 1
+ for cp in sorted(cp_missing):
+ writemsg_level(
+ "No ebuilds or cache entries found for '%s'\n" % (cp,),
+ level=logging.ERROR, noiselevel=-1)
+
+ if dead_nodes:
+ dead_nodes.difference_update(self._existing_nodes)
+ for k in dead_nodes:
+ try:
+ del trg_cache[k]
+ except KeyError:
+ pass
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "%s deleting stale cache: %s\n" % (k, ce),
+ level=logging.ERROR, noiselevel=-1)
+
+ if not trg_cache.autocommits:
+ try:
+ trg_cache.commit()
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "committing target: %s\n" % (ce,),
+ level=logging.ERROR, noiselevel=-1)
+
+class GenUseLocalDesc(object):
+ def __init__(self, portdb, output=None,
+ preserve_comments=False):
+ self.returncode = os.EX_OK
+ self._portdb = portdb
+ self._output = output
+ self._preserve_comments = preserve_comments
+
+ def run(self):
+ repo_path = self._portdb.porttrees[0]
+ ops = {'<':0, '<=':1, '=':2, '>=':3, '>':4}
+
+ if self._output is None or self._output != '-':
+ if self._output is None:
+ prof_path = os.path.join(repo_path, 'profiles')
+ desc_path = os.path.join(prof_path, 'use.local.desc')
+ try:
+ os.mkdir(prof_path)
+ except OSError:
+ pass
+ else:
+ desc_path = self._output
+
+ try:
+ if self._preserve_comments:
+ # Probe in binary mode, in order to avoid
+ # potential character encoding issues.
+ output = open(_unicode_encode(desc_path,
+ encoding=_encodings['fs'], errors='strict'), 'r+b')
+ else:
+ output = io.open(_unicode_encode(desc_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except IOError as e:
+ if not self._preserve_comments or \
+ os.path.isfile(desc_path):
+ writemsg_level(
+ "ERROR: failed to open output file %s: %s\n" \
+ % (desc_path, e), level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 2
+ return
+
+ # Open in r+b mode failed because the file doesn't
+ # exist yet. We can probably recover if we disable
+ # preserve_comments mode now.
+ writemsg_level(
+ "WARNING: --preserve-comments enabled, but " + \
+ "output file not found: %s\n" % (desc_path,),
+ level=logging.WARNING, noiselevel=-1)
+ self._preserve_comments = False
+ try:
+ output = io.open(_unicode_encode(desc_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except IOError as e:
+ writemsg_level(
+ "ERROR: failed to open output file %s: %s\n" \
+ % (desc_path, e), level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 2
+ return
+ else:
+ output = sys.stdout
+
+ if self._preserve_comments:
+ while True:
+ pos = output.tell()
+ if not output.readline().startswith(b'#'):
+ break
+ output.seek(pos)
+ output.truncate()
+ output.close()
+
+ # Finished probing comments in binary mode, now append
+ # in text mode.
+ output = io.open(_unicode_encode(desc_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ output.write(_unicode_decode('\n'))
+ else:
+ output.write(_unicode_decode('''
+# This file is deprecated as per GLEP 56 in favor of metadata.xml. Please add
+# your descriptions to your package's metadata.xml ONLY.
+# * generated automatically using egencache *
+
+'''.lstrip()))
+
+ # The cmp function no longer exists in python3, so we'll
+ # implement our own here under a slightly different name
+ # since we don't want any confusion given that we never
+ # want to rely on the builtin cmp function.
+ def cmp_func(a, b):
+ if a is None or b is None:
+ # None can't be compared with other types in python3.
+ if a is None and b is None:
+ return 0
+ elif a is None:
+ return -1
+ else:
+ return 1
+ return (a > b) - (a < b)
+
+ for cp in self._portdb.cp_all():
+ metadata_path = os.path.join(repo_path, cp, 'metadata.xml')
+ try:
+ metadata = ElementTree.parse(metadata_path)
+ except IOError:
+ pass
+ except (ExpatError, EnvironmentError) as e:
+ writemsg_level(
+ "ERROR: failed parsing %s/metadata.xml: %s\n" % (cp, e),
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 1
+ else:
+ try:
+ usedict = parse_metadata_use(metadata)
+ except portage.exception.ParseError as e:
+ writemsg_level(
+ "ERROR: failed parsing %s/metadata.xml: %s\n" % (cp, e),
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 1
+ else:
+ for flag in sorted(usedict):
+ def atomcmp(atoma, atomb):
+ # None is better than an atom, that's why we reverse the args
+ if atoma is None or atomb is None:
+ return cmp_func(atomb, atoma)
+ # Same for plain PNs (.operator is None then)
+ elif atoma.operator is None or atomb.operator is None:
+ return cmp_func(atomb.operator, atoma.operator)
+ # Version matching
+ elif atoma.cpv != atomb.cpv:
+ return pkgcmp(pkgsplit(atoma.cpv), pkgsplit(atomb.cpv))
+ # Versions match, let's fallback to operator matching
+ else:
+ return cmp_func(ops.get(atoma.operator, -1),
+ ops.get(atomb.operator, -1))
+
+ def _Atom(key):
+ if key is not None:
+ return Atom(key)
+ return None
+
+ resdict = usedict[flag]
+ if len(resdict) == 1:
+ resdesc = next(iter(resdict.items()))[1]
+ else:
+ try:
+ reskeys = dict((_Atom(k), k) for k in resdict)
+ except portage.exception.InvalidAtom as e:
+ writemsg_level(
+ "ERROR: failed parsing %s/metadata.xml: %s\n" % (cp, e),
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 1
+ resdesc = next(iter(resdict.items()))[1]
+ else:
+ resatoms = sorted(reskeys, key=cmp_sort_key(atomcmp))
+ resdesc = resdict[reskeys[resatoms[-1]]]
+
+ output.write(_unicode_decode(
+ '%s:%s - %s\n' % (cp, flag, resdesc)))
+
+ output.close()
+
+if sys.hexversion < 0x3000000:
+ _filename_base = unicode
+else:
+ _filename_base = str
+
+class _special_filename(_filename_base):
+ """
+ Helps to sort file names by file type and other criteria.
+ """
+ def __new__(cls, status_change, file_name):
+ return _filename_base.__new__(cls, status_change + file_name)
+
+ def __init__(self, status_change, file_name):
+ _filename_base.__init__(status_change + file_name)
+ self.status_change = status_change
+ self.file_name = file_name
+ self.file_type = guessManifestFileType(file_name)
+
+ def file_type_lt(self, a, b):
+ """
+ Defines an ordering between file types.
+ """
+ first = a.file_type
+ second = b.file_type
+ if first == second:
+ return False
+
+ if first == "EBUILD":
+ return True
+ elif first == "MISC":
+ return second in ("EBUILD",)
+ elif first == "AUX":
+ return second in ("EBUILD", "MISC")
+ elif first == "DIST":
+ return second in ("EBUILD", "MISC", "AUX")
+ elif first is None:
+ return False
+ else:
+ raise ValueError("Unknown file type '%s'" % first)
+
+ def __lt__(self, other):
+ """
+ Compare different file names, first by file type and then
+ for ebuilds by version and lexicographically for others.
+ EBUILD < MISC < AUX < DIST < None
+ """
+ if self.__class__ != other.__class__:
+ raise NotImplementedError
+
+ # Sort by file type as defined by file_type_lt().
+ if self.file_type_lt(self, other):
+ return True
+ elif self.file_type_lt(other, self):
+ return False
+
+ # Files have the same type.
+ if self.file_type == "EBUILD":
+ # Sort by version. Lowest first.
+ ver = "-".join(pkgsplit(self.file_name[:-7])[1:3])
+ other_ver = "-".join(pkgsplit(other.file_name[:-7])[1:3])
+ return vercmp(ver, other_ver) < 0
+ else:
+ # Sort lexicographically.
+ return self.file_name < other.file_name
+
+class GenChangeLogs(object):
+ def __init__(self, portdb):
+ self.returncode = os.EX_OK
+ self._portdb = portdb
+ self._wrapper = textwrap.TextWrapper(
+ width = 78,
+ initial_indent = ' ',
+ subsequent_indent = ' '
+ )
+
+ @staticmethod
+ def grab(cmd):
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ return _unicode_decode(p.communicate()[0],
+ encoding=_encodings['stdio'], errors='strict')
+
+ def generate_changelog(self, cp):
+ try:
+ output = io.open('ChangeLog',
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ except IOError as e:
+ writemsg_level(
+ "ERROR: failed to open ChangeLog for %s: %s\n" % (cp,e,),
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 2
+ return
+
+ output.write(_unicode_decode('''
+# ChangeLog for %s
+# Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2
+# $Header: $
+
+''' % (cp, time.strftime('%Y'))).lstrip())
+
+ # now grab all the commits
+ commits = self.grab(['git', 'rev-list', 'HEAD', '--', '.']).split()
+
+ for c in commits:
+ # Explaining the arguments:
+ # --name-status to get a list of added/removed files
+ # --no-renames to avoid getting more complex records on the list
+ # --format to get the timestamp, author and commit description
+ # --root to make it work fine even with the initial commit
+ # --relative to get paths relative to ebuilddir
+ # -r (recursive) to get per-file changes
+ # then the commit-id and path.
+
+ cinfo = self.grab(['git', 'diff-tree', '--name-status', '--no-renames',
+ '--format=%ct %cN <%cE>%n%B', '--root', '--relative', '-r',
+ c, '--', '.']).rstrip('\n').split('\n')
+
+ # Expected output:
+ # timestamp Author Name <author@email>
+ # commit message l1
+ # ...
+ # commit message ln
+ #
+ # status1 filename1
+ # ...
+ # statusn filenamen
+
+ changed = []
+ for n, l in enumerate(reversed(cinfo)):
+ if not l:
+ body = cinfo[1:-n-1]
+ break
+ else:
+ f = l.split()
+ if f[1] == 'Manifest':
+ pass # XXX: remanifest commits?
+ elif f[1] == 'ChangeLog':
+ pass
+ elif f[0].startswith('A'):
+ changed.append(_special_filename("+", f[1]))
+ elif f[0].startswith('D'):
+ changed.append(_special_filename("-", f[1]))
+ elif f[0].startswith('M'):
+ changed.append(_special_filename("", f[1]))
+ else:
+ writemsg_level(
+ "ERROR: unexpected git file status for %s: %s\n" % (cp,f,),
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode |= 1
+
+ if not changed:
+ continue
+
+ (ts, author) = cinfo[0].split(' ', 1)
+ date = time.strftime('%d %b %Y', time.gmtime(float(ts)))
+
+ changed = [str(x) for x in sorted(changed)]
+
+ wroteheader = False
+ # Reverse the sort order for headers.
+ for c in reversed(changed):
+ if c.startswith('+') and c.endswith('.ebuild'):
+ output.write(_unicode_decode(
+ '*%s (%s)\n' % (c[1:-7], date)))
+ wroteheader = True
+ if wroteheader:
+ output.write(_unicode_decode('\n'))
+
+ # strip '<cp>: ', '[<cp>] ', and similar
+ body[0] = re.sub(r'^\W*' + re.escape(cp) + r'\W+', '', body[0])
+ # strip trailing newline
+ if not body[-1]:
+ body = body[:-1]
+ # strip git-svn id
+ if body[-1].startswith('git-svn-id:') and not body[-2]:
+ body = body[:-2]
+ # strip the repoman version/manifest note
+ if body[-1] == ' (Signed Manifest commit)' or body[-1] == ' (Unsigned Manifest commit)':
+ body = body[:-1]
+ if body[-1].startswith('(Portage version:') and body[-1].endswith(')'):
+ body = body[:-1]
+ if not body[-1]:
+ body = body[:-1]
+
+ # don't break filenames on hyphens
+ self._wrapper.break_on_hyphens = False
+ output.write(_unicode_decode(
+ self._wrapper.fill(
+ '%s; %s %s:' % (date, author, ', '.join(changed)))))
+ # but feel free to break commit messages there
+ self._wrapper.break_on_hyphens = True
+ output.write(_unicode_decode(
+ '\n%s\n\n' % '\n'.join(self._wrapper.fill(x) for x in body)))
+
+ output.close()
+
+ def run(self):
+ repo_path = self._portdb.porttrees[0]
+ os.chdir(repo_path)
+
+ if 'git' not in FindVCS():
+ writemsg_level(
+ "ERROR: --update-changelogs supported only in git repos\n",
+ level=logging.ERROR, noiselevel=-1)
+ self.returncode = 127
+ return
+
+ for cp in self._portdb.cp_all():
+ os.chdir(os.path.join(repo_path, cp))
+ # Determine whether ChangeLog is up-to-date by comparing
+ # the newest commit timestamp with the ChangeLog timestamp.
+ lmod = self.grab(['git', 'log', '--format=%ct', '-1', '.'])
+ if not lmod:
+ # This cp has not been added to the repo.
+ continue
+
+ try:
+ cmod = os.stat('ChangeLog').st_mtime
+ except OSError:
+ cmod = 0
+
+ if float(cmod) < float(lmod):
+ self.generate_changelog(cp)
+
+def egencache_main(args):
+ parser, options, atoms = parse_args(args)
+
+ config_root = options.config_root
+ if config_root is None:
+ config_root = '/'
+
+ # The calling environment is ignored, so the program is
+ # completely controlled by commandline arguments.
+ env = {}
+
+ if options.repo is None:
+ env['PORTDIR_OVERLAY'] = ''
+
+ if options.cache_dir is not None:
+ env['PORTAGE_DEPCACHEDIR'] = options.cache_dir
+
+ if options.portdir is not None:
+ env['PORTDIR'] = options.portdir
+
+ settings = portage.config(config_root=config_root,
+ target_root='/', local_config=False, env=env)
+
+ default_opts = None
+ if not options.ignore_default_opts:
+ default_opts = settings.get('EGENCACHE_DEFAULT_OPTS', '').split()
+
+ if default_opts:
+ parser, options, args = parse_args(default_opts + args)
+
+ if options.config_root is not None:
+ config_root = options.config_root
+
+ if options.cache_dir is not None:
+ env['PORTAGE_DEPCACHEDIR'] = options.cache_dir
+
+ settings = portage.config(config_root=config_root,
+ target_root='/', local_config=False, env=env)
+
+ if not options.update and not options.update_use_local_desc \
+ and not options.update_changelogs:
+ parser.error('No action specified')
+ return 1
+
+ if options.update and 'metadata-transfer' not in settings.features:
+ writemsg_level("ecachegen: warning: " + \
+ "automatically enabling FEATURES=metadata-transfer\n",
+ level=logging.WARNING, noiselevel=-1)
+ settings.features.add('metadata-transfer')
+
+ settings.lock()
+
+ portdb = portage.portdbapi(mysettings=settings)
+ if options.repo is not None:
+ repo_path = portdb.getRepositoryPath(options.repo)
+ if repo_path is None:
+ parser.error("Unable to locate repository named '%s'" % \
+ (options.repo,))
+ return 1
+
+ # Limit ebuilds to the specified repo.
+ portdb.porttrees = [repo_path]
+
+ ret = [os.EX_OK]
+
+ if options.update:
+ cp_iter = None
+ if atoms:
+ cp_iter = iter(atoms)
+
+ gen_cache = GenCache(portdb, cp_iter=cp_iter,
+ max_jobs=options.jobs,
+ max_load=options.load_average,
+ rsync=options.rsync)
+ gen_cache.run()
+ if options.tolerant:
+ ret.append(os.EX_OK)
+ else:
+ ret.append(gen_cache.returncode)
+
+ if options.update_use_local_desc:
+ gen_desc = GenUseLocalDesc(portdb,
+ output=options.uld_output,
+ preserve_comments=options.preserve_comments)
+ gen_desc.run()
+ ret.append(gen_desc.returncode)
+
+ if options.update_changelogs:
+ gen_clogs = GenChangeLogs(portdb)
+ gen_clogs.run()
+ ret.append(gen_clogs.returncode)
+
+ return max(ret)
+
+if __name__ == "__main__":
+ portage._disable_legacy_globals()
+ portage.util.noiselimit = -1
+ sys.exit(egencache_main(sys.argv[1:]))
diff --git a/portage_with_autodep/bin/emaint b/portage_with_autodep/bin/emaint
new file mode 100755
index 0000000..fdd01ed
--- /dev/null
+++ b/portage_with_autodep/bin/emaint
@@ -0,0 +1,654 @@
+#!/usr/bin/python -O
+# vim: noet :
+
+from __future__ import print_function
+
+import errno
+import re
+import signal
+import stat
+import sys
+import textwrap
+import time
+from optparse import OptionParser, OptionValueError
+
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+
+from portage import os
+from portage.util import writemsg
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class WorldHandler(object):
+
+ short_desc = "Fix problems in the world file"
+
+ def name():
+ return "world"
+ name = staticmethod(name)
+
+ def __init__(self):
+ self.invalid = []
+ self.not_installed = []
+ self.invalid_category = []
+ self.okay = []
+ from portage._sets import load_default_config
+ setconfig = load_default_config(portage.settings,
+ portage.db[portage.settings["ROOT"]])
+ self._sets = setconfig.getSets()
+
+ def _check_world(self, onProgress):
+ categories = set(portage.settings.categories)
+ myroot = portage.settings["ROOT"]
+ self.world_file = os.path.join(portage.settings["EROOT"], portage.const.WORLD_FILE)
+ self.found = os.access(self.world_file, os.R_OK)
+ vardb = portage.db[myroot]["vartree"].dbapi
+
+ from portage._sets import SETPREFIX
+ sets = self._sets
+ world_atoms = list(sets["selected"])
+ maxval = len(world_atoms)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, atom in enumerate(world_atoms):
+ if not isinstance(atom, portage.dep.Atom):
+ if atom.startswith(SETPREFIX):
+ s = atom[len(SETPREFIX):]
+ if s in sets:
+ self.okay.append(atom)
+ else:
+ self.not_installed.append(atom)
+ else:
+ self.invalid.append(atom)
+ if onProgress:
+ onProgress(maxval, i+1)
+ continue
+ okay = True
+ if not vardb.match(atom):
+ self.not_installed.append(atom)
+ okay = False
+ if portage.catsplit(atom.cp)[0] not in categories:
+ self.invalid_category.append(atom)
+ okay = False
+ if okay:
+ self.okay.append(atom)
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ def check(self, onProgress=None):
+ self._check_world(onProgress)
+ errors = []
+ if self.found:
+ errors += ["'%s' is not a valid atom" % x for x in self.invalid]
+ errors += ["'%s' is not installed" % x for x in self.not_installed]
+ errors += ["'%s' has a category that is not listed in /etc/portage/categories" % x for x in self.invalid_category]
+ else:
+ errors.append(self.world_file + " could not be opened for reading")
+ return errors
+
+ def fix(self, onProgress=None):
+ world_set = self._sets["selected"]
+ world_set.lock()
+ try:
+ world_set.load() # maybe it's changed on disk
+ before = set(world_set)
+ self._check_world(onProgress)
+ after = set(self.okay)
+ errors = []
+ if before != after:
+ try:
+ world_set.replace(self.okay)
+ except portage.exception.PortageException:
+ errors.append("%s could not be opened for writing" % \
+ self.world_file)
+ return errors
+ finally:
+ world_set.unlock()
+
+class BinhostHandler(object):
+
+ short_desc = "Generate a metadata index for binary packages"
+
+ def name():
+ return "binhost"
+ name = staticmethod(name)
+
+ def __init__(self):
+ myroot = portage.settings["ROOT"]
+ self._bintree = portage.db[myroot]["bintree"]
+ self._bintree.populate()
+ self._pkgindex_file = self._bintree._pkgindex_file
+ self._pkgindex = self._bintree._load_pkgindex()
+
+ def _need_update(self, cpv, data):
+
+ if "MD5" not in data:
+ return True
+
+ size = data.get("SIZE")
+ if size is None:
+ return True
+
+ mtime = data.get("MTIME")
+ if mtime is None:
+ return True
+
+ pkg_path = self._bintree.getname(cpv)
+ try:
+ s = os.lstat(pkg_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ # We can't update the index for this one because
+ # it disappeared.
+ return False
+
+ try:
+ if long(mtime) != s[stat.ST_MTIME]:
+ return True
+ if long(size) != long(s.st_size):
+ return True
+ except ValueError:
+ return True
+
+ return False
+
+ def check(self, onProgress=None):
+ missing = []
+ cpv_all = self._bintree.dbapi.cpv_all()
+ cpv_all.sort()
+ maxval = len(cpv_all)
+ if onProgress:
+ onProgress(maxval, 0)
+ pkgindex = self._pkgindex
+ missing = []
+ metadata = {}
+ for d in pkgindex.packages:
+ metadata[d["CPV"]] = d
+ for i, cpv in enumerate(cpv_all):
+ d = metadata.get(cpv)
+ if not d or self._need_update(cpv, d):
+ missing.append(cpv)
+ if onProgress:
+ onProgress(maxval, i+1)
+ errors = ["'%s' is not in Packages" % cpv for cpv in missing]
+ stale = set(metadata).difference(cpv_all)
+ for cpv in stale:
+ errors.append("'%s' is not in the repository" % cpv)
+ return errors
+
+ def fix(self, onProgress=None):
+ bintree = self._bintree
+ cpv_all = self._bintree.dbapi.cpv_all()
+ cpv_all.sort()
+ missing = []
+ maxval = 0
+ if onProgress:
+ onProgress(maxval, 0)
+ pkgindex = self._pkgindex
+ missing = []
+ metadata = {}
+ for d in pkgindex.packages:
+ metadata[d["CPV"]] = d
+
+ for i, cpv in enumerate(cpv_all):
+ d = metadata.get(cpv)
+ if not d or self._need_update(cpv, d):
+ missing.append(cpv)
+
+ stale = set(metadata).difference(cpv_all)
+ if missing or stale:
+ from portage import locks
+ pkgindex_lock = locks.lockfile(
+ self._pkgindex_file, wantnewlockfile=1)
+ try:
+ # Repopulate with lock held.
+ bintree._populate()
+ cpv_all = self._bintree.dbapi.cpv_all()
+ cpv_all.sort()
+
+ pkgindex = bintree._load_pkgindex()
+ self._pkgindex = pkgindex
+
+ metadata = {}
+ for d in pkgindex.packages:
+ metadata[d["CPV"]] = d
+
+ # Recount missing packages, with lock held.
+ del missing[:]
+ for i, cpv in enumerate(cpv_all):
+ d = metadata.get(cpv)
+ if not d or self._need_update(cpv, d):
+ missing.append(cpv)
+
+ maxval = len(missing)
+ for i, cpv in enumerate(missing):
+ try:
+ metadata[cpv] = bintree._pkgindex_entry(cpv)
+ except portage.exception.InvalidDependString:
+ writemsg("!!! Invalid binary package: '%s'\n" % \
+ bintree.getname(cpv), noiselevel=-1)
+
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ for cpv in set(metadata).difference(
+ self._bintree.dbapi.cpv_all()):
+ del metadata[cpv]
+
+ # We've updated the pkgindex, so set it to
+ # repopulate when necessary.
+ bintree.populated = False
+
+ del pkgindex.packages[:]
+ pkgindex.packages.extend(metadata.values())
+ from portage.util import atomic_ofstream
+ f = atomic_ofstream(self._pkgindex_file)
+ try:
+ self._pkgindex.write(f)
+ finally:
+ f.close()
+ finally:
+ locks.unlockfile(pkgindex_lock)
+
+ if onProgress:
+ if maxval == 0:
+ maxval = 1
+ onProgress(maxval, maxval)
+ return None
+
+class MoveHandler(object):
+
+ def __init__(self, tree, porttree):
+ self._tree = tree
+ self._portdb = porttree.dbapi
+ self._update_keys = ["DEPEND", "RDEPEND", "PDEPEND", "PROVIDE"]
+ self._master_repo = \
+ self._portdb.getRepositoryName(self._portdb.porttree_root)
+
+ def _grab_global_updates(self):
+ from portage.update import grab_updates, parse_updates
+ retupdates = {}
+ errors = []
+
+ for repo_name in self._portdb.getRepositories():
+ repo = self._portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ try:
+ rawupdates = grab_updates(updpath)
+ except portage.exception.DirectoryNotFound:
+ rawupdates = []
+ upd_commands = []
+ for mykey, mystat, mycontent in rawupdates:
+ commands, errors = parse_updates(mycontent)
+ upd_commands.extend(commands)
+ errors.extend(errors)
+ retupdates[repo_name] = upd_commands
+
+ if self._master_repo in retupdates:
+ retupdates['DEFAULT'] = retupdates[self._master_repo]
+
+ return retupdates, errors
+
+ def check(self, onProgress=None):
+ allupdates, errors = self._grab_global_updates()
+ # Matching packages and moving them is relatively fast, so the
+ # progress bar is updated in indeterminate mode.
+ match = self._tree.dbapi.match
+ aux_get = self._tree.dbapi.aux_get
+ if onProgress:
+ onProgress(0, 0)
+ for repo, updates in allupdates.items():
+ if repo == 'DEFAULT':
+ continue
+ if not updates:
+ continue
+
+ def repo_match(repository):
+ return repository == repo or \
+ (repo == self._master_repo and \
+ repository not in allupdates)
+
+ for i, update_cmd in enumerate(updates):
+ if update_cmd[0] == "move":
+ origcp, newcp = update_cmd[1:]
+ for cpv in match(origcp):
+ if repo_match(aux_get(cpv, ["repository"])[0]):
+ errors.append("'%s' moved to '%s'" % (cpv, newcp))
+ elif update_cmd[0] == "slotmove":
+ pkg, origslot, newslot = update_cmd[1:]
+ for cpv in match(pkg):
+ slot, prepo = aux_get(cpv, ["SLOT", "repository"])
+ if slot == origslot and repo_match(prepo):
+ errors.append("'%s' slot moved from '%s' to '%s'" % \
+ (cpv, origslot, newslot))
+ if onProgress:
+ onProgress(0, 0)
+
+ # Searching for updates in all the metadata is relatively slow, so this
+ # is where the progress bar comes out of indeterminate mode.
+ cpv_all = self._tree.dbapi.cpv_all()
+ cpv_all.sort()
+ maxval = len(cpv_all)
+ aux_update = self._tree.dbapi.aux_update
+ meta_keys = self._update_keys + ['repository']
+ from portage.update import update_dbentries
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, cpv in enumerate(cpv_all):
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ repository = metadata.pop('repository')
+ try:
+ updates = allupdates[repository]
+ except KeyError:
+ try:
+ updates = allupdates['DEFAULT']
+ except KeyError:
+ continue
+ if not updates:
+ continue
+ metadata_updates = update_dbentries(updates, metadata)
+ if metadata_updates:
+ errors.append("'%s' has outdated metadata" % cpv)
+ if onProgress:
+ onProgress(maxval, i+1)
+ return errors
+
+ def fix(self, onProgress=None):
+ allupdates, errors = self._grab_global_updates()
+ # Matching packages and moving them is relatively fast, so the
+ # progress bar is updated in indeterminate mode.
+ move = self._tree.dbapi.move_ent
+ slotmove = self._tree.dbapi.move_slot_ent
+ if onProgress:
+ onProgress(0, 0)
+ for repo, updates in allupdates.items():
+ if repo == 'DEFAULT':
+ continue
+ if not updates:
+ continue
+
+ def repo_match(repository):
+ return repository == repo or \
+ (repo == self._master_repo and \
+ repository not in allupdates)
+
+ for i, update_cmd in enumerate(updates):
+ if update_cmd[0] == "move":
+ move(update_cmd, repo_match=repo_match)
+ elif update_cmd[0] == "slotmove":
+ slotmove(update_cmd, repo_match=repo_match)
+ if onProgress:
+ onProgress(0, 0)
+
+ # Searching for updates in all the metadata is relatively slow, so this
+ # is where the progress bar comes out of indeterminate mode.
+ self._tree.dbapi.update_ents(allupdates, onProgress=onProgress)
+ return errors
+
+class MoveInstalled(MoveHandler):
+
+ short_desc = "Perform package move updates for installed packages"
+
+ def name():
+ return "moveinst"
+ name = staticmethod(name)
+ def __init__(self):
+ myroot = portage.settings["ROOT"]
+ MoveHandler.__init__(self, portage.db[myroot]["vartree"], portage.db[myroot]["porttree"])
+
+class MoveBinary(MoveHandler):
+
+ short_desc = "Perform package move updates for binary packages"
+
+ def name():
+ return "movebin"
+ name = staticmethod(name)
+ def __init__(self):
+ myroot = portage.settings["ROOT"]
+ MoveHandler.__init__(self, portage.db[myroot]["bintree"], portage.db[myroot]["porttree"])
+
+class VdbKeyHandler(object):
+ def name():
+ return "vdbkeys"
+ name = staticmethod(name)
+
+ def __init__(self):
+ self.list = portage.db["/"]["vartree"].dbapi.cpv_all()
+ self.missing = []
+ self.keys = ["HOMEPAGE", "SRC_URI", "KEYWORDS", "DESCRIPTION"]
+
+ for p in self.list:
+ mydir = os.path.join(portage.settings["EROOT"], portage.const.VDB_PATH, p)+os.sep
+ ismissing = True
+ for k in self.keys:
+ if os.path.exists(mydir+k):
+ ismissing = False
+ break
+ if ismissing:
+ self.missing.append(p)
+
+ def check(self):
+ return ["%s has missing keys" % x for x in self.missing]
+
+ def fix(self):
+
+ errors = []
+
+ for p in self.missing:
+ mydir = os.path.join(portage.settings["EROOT"], portage.const.VDB_PATH, p)+os.sep
+ if not os.access(mydir+"environment.bz2", os.R_OK):
+ errors.append("Can't access %s" % (mydir+"environment.bz2"))
+ elif not os.access(mydir, os.W_OK):
+ errors.append("Can't create files in %s" % mydir)
+ else:
+ env = os.popen("bzip2 -dcq "+mydir+"environment.bz2", "r")
+ envlines = env.read().split("\n")
+ env.close()
+ for k in self.keys:
+ s = [l for l in envlines if l.startswith(k+"=")]
+ if len(s) > 1:
+ errors.append("multiple matches for %s found in %senvironment.bz2" % (k, mydir))
+ elif len(s) == 0:
+ s = ""
+ else:
+ s = s[0].split("=",1)[1]
+ s = s.lstrip("$").strip("\'\"")
+ s = re.sub("(\\\\[nrt])+", " ", s)
+ s = " ".join(s.split()).strip()
+ if s != "":
+ try:
+ keyfile = open(mydir+os.sep+k, "w")
+ keyfile.write(s+"\n")
+ keyfile.close()
+ except (IOError, OSError) as e:
+ errors.append("Could not write %s, reason was: %s" % (mydir+k, e))
+
+ return errors
+
+class ProgressHandler(object):
+ def __init__(self):
+ self.curval = 0
+ self.maxval = 0
+ self.last_update = 0
+ self.min_display_latency = 0.2
+
+ def onProgress(self, maxval, curval):
+ self.maxval = maxval
+ self.curval = curval
+ cur_time = time.time()
+ if cur_time - self.last_update >= self.min_display_latency:
+ self.last_update = cur_time
+ self.display()
+
+ def display(self):
+ raise NotImplementedError(self)
+
+class CleanResume(object):
+
+ short_desc = "Discard emerge --resume merge lists"
+
+ def name():
+ return "cleanresume"
+ name = staticmethod(name)
+
+ def check(self, onProgress=None):
+ messages = []
+ mtimedb = portage.mtimedb
+ resume_keys = ("resume", "resume_backup")
+ maxval = len(resume_keys)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, k in enumerate(resume_keys):
+ try:
+ d = mtimedb.get(k)
+ if d is None:
+ continue
+ if not isinstance(d, dict):
+ messages.append("unrecognized resume list: '%s'" % k)
+ continue
+ mergelist = d.get("mergelist")
+ if mergelist is None or not hasattr(mergelist, "__len__"):
+ messages.append("unrecognized resume list: '%s'" % k)
+ continue
+ messages.append("resume list '%s' contains %d packages" % \
+ (k, len(mergelist)))
+ finally:
+ if onProgress:
+ onProgress(maxval, i+1)
+ return messages
+
+ def fix(self, onProgress=None):
+ delete_count = 0
+ mtimedb = portage.mtimedb
+ resume_keys = ("resume", "resume_backup")
+ maxval = len(resume_keys)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, k in enumerate(resume_keys):
+ try:
+ if mtimedb.pop(k, None) is not None:
+ delete_count += 1
+ finally:
+ if onProgress:
+ onProgress(maxval, i+1)
+ if delete_count:
+ mtimedb.commit()
+
+def emaint_main(myargv):
+
+ # Similar to emerge, emaint needs a default umask so that created
+ # files (such as the world file) have sane permissions.
+ os.umask(0o22)
+
+ # TODO: Create a system that allows external modules to be added without
+ # the need for hard coding.
+ modules = {
+ "world" : WorldHandler,
+ "binhost":BinhostHandler,
+ "moveinst":MoveInstalled,
+ "movebin":MoveBinary,
+ "cleanresume":CleanResume
+ }
+
+ module_names = list(modules)
+ module_names.sort()
+ module_names.insert(0, "all")
+
+ def exclusive(option, *args, **kw):
+ var = kw.get("var", None)
+ if var is None:
+ raise ValueError("var not specified to exclusive()")
+ if getattr(parser, var, ""):
+ raise OptionValueError("%s and %s are exclusive options" % (getattr(parser, var), option))
+ setattr(parser, var, str(option))
+
+
+ usage = "usage: emaint [options] COMMAND"
+
+ desc = "The emaint program provides an interface to system health " + \
+ "checks and maintenance. See the emaint(1) man page " + \
+ "for additional information about the following commands:"
+
+ usage += "\n\n"
+ for line in textwrap.wrap(desc, 65):
+ usage += "%s\n" % line
+ usage += "\n"
+ usage += " %s" % "all".ljust(15) + \
+ "Perform all supported commands\n"
+ for m in module_names[1:]:
+ usage += " %s%s\n" % (m.ljust(15), modules[m].short_desc)
+
+ parser = OptionParser(usage=usage, version=portage.VERSION)
+ parser.add_option("-c", "--check", help="check for problems",
+ action="callback", callback=exclusive, callback_kwargs={"var":"action"})
+ parser.add_option("-f", "--fix", help="attempt to fix problems",
+ action="callback", callback=exclusive, callback_kwargs={"var":"action"})
+ parser.action = None
+
+
+ (options, args) = parser.parse_args(args=myargv)
+ if len(args) != 1:
+ parser.error("Incorrect number of arguments")
+ if args[0] not in module_names:
+ parser.error("%s target is not a known target" % args[0])
+
+ if parser.action:
+ action = parser.action
+ else:
+ print("Defaulting to --check")
+ action = "-c/--check"
+
+ if args[0] == "all":
+ tasks = modules.values()
+ else:
+ tasks = [modules[args[0]]]
+
+
+ if action == "-c/--check":
+ status = "Checking %s for problems"
+ func = "check"
+ else:
+ status = "Attempting to fix %s"
+ func = "fix"
+
+ isatty = os.environ.get('TERM') != 'dumb' and sys.stdout.isatty()
+ for task in tasks:
+ print(status % task.name())
+ inst = task()
+ onProgress = None
+ if isatty:
+ progressBar = portage.output.TermProgressBar()
+ progressHandler = ProgressHandler()
+ onProgress = progressHandler.onProgress
+ def display():
+ progressBar.set(progressHandler.curval, progressHandler.maxval)
+ progressHandler.display = display
+ def sigwinch_handler(signum, frame):
+ lines, progressBar.term_columns = \
+ portage.output.get_term_size()
+ signal.signal(signal.SIGWINCH, sigwinch_handler)
+ result = getattr(inst, func)(onProgress=onProgress)
+ if isatty:
+ # make sure the final progress is displayed
+ progressHandler.display()
+ print()
+ signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+ if result:
+ print()
+ print("\n".join(result))
+ print("\n")
+
+ print("Finished")
+
+if __name__ == "__main__":
+ emaint_main(sys.argv[1:])
diff --git a/portage_with_autodep/bin/emerge b/portage_with_autodep/bin/emerge
new file mode 100755
index 0000000..6f69244
--- /dev/null
+++ b/portage_with_autodep/bin/emerge
@@ -0,0 +1,66 @@
+#!/usr/bin/python
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+ def exithandler(signum,frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGINT, exithandler)
+ signal.signal(signal.SIGTERM, exithandler)
+ # Prevent "[Errno 32] Broken pipe" exceptions when
+ # writing to a pipe.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+except KeyboardInterrupt:
+ sys.exit(128 + signal.SIGINT)
+
+def debug_signal(signum, frame):
+ import pdb
+ pdb.set_trace()
+signal.signal(signal.SIGUSR1, debug_signal)
+
+try:
+ from _emerge.main import emerge_main
+except ImportError:
+ from os import path as osp
+ import sys
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ from _emerge.main import emerge_main
+
+if __name__ == "__main__":
+ import sys
+ from portage.exception import ParseError, PermissionDenied
+ try:
+ retval = emerge_main()
+ except PermissionDenied as e:
+ sys.stderr.write("Permission denied: '%s'\n" % str(e))
+ sys.exit(e.errno)
+ except ParseError as e:
+ sys.stderr.write("%s\n" % str(e))
+ sys.exit(1)
+ except SystemExit:
+ raise
+ except Exception:
+ # If an unexpected exception occurs then we don't want the mod_echo
+ # output to obscure the traceback, so dump the mod_echo output before
+ # showing the traceback.
+ import traceback
+ tb_str = traceback.format_exc()
+ try:
+ from portage.elog import mod_echo
+ except ImportError:
+ pass
+ else:
+ mod_echo.finalize()
+ sys.stderr.write(tb_str)
+ sys.exit(1)
+ sys.exit(retval)
diff --git a/portage_with_autodep/bin/emerge-webrsync b/portage_with_autodep/bin/emerge-webrsync
new file mode 100755
index 0000000..d933871
--- /dev/null
+++ b/portage_with_autodep/bin/emerge-webrsync
@@ -0,0 +1,457 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author: Karl Trygve Kalleberg <karltk@gentoo.org>
+# Rewritten from the old, Perl-based emerge-webrsync script
+# Author: Alon Bar-Lev <alon.barlev@gmail.com>
+# Major rewrite from Karl's scripts.
+
+# TODO:
+# - all output should prob be converted to e* funcs
+# - add support for ROOT
+
+#
+# gpg key import
+# KEY_ID=0x239C75C4
+# gpg --homedir /etc/portage/gnupg --keyserver subkeys.pgp.net --recv-keys $KEY_ID
+# gpg --homedir /etc/portage/gnupg --edit-key $KEY_ID trust
+#
+
+# Only echo if in verbose mode
+vvecho() { [[ ${do_verbose} -eq 1 ]] && echo "$@" ; }
+# Only echo if not in verbose mode
+nvecho() { [[ ${do_verbose} -eq 0 ]] && echo "$@" ; }
+# warning echos
+wecho() { echo "${argv0}: warning: $*" 1>&2 ; }
+# error echos
+eecho() { echo "${argv0}: error: $*" 1>&2 ; }
+
+argv0=$0
+if ! type -P portageq > /dev/null ; then
+ eecho "could not find 'portageq'; aborting"
+ exit 1
+fi
+eval $(portageq envvar -v FEATURES FETCHCOMMAND GENTOO_MIRRORS \
+ PORTAGE_BIN_PATH PORTAGE_GPG_DIR \
+ PORTAGE_NICENESS PORTAGE_RSYNC_EXTRA_OPTS PORTAGE_TMPDIR PORTDIR \
+ SYNC http_proxy ftp_proxy)
+DISTDIR="${PORTAGE_TMPDIR}/emerge-webrsync"
+export http_proxy ftp_proxy
+
+# If PORTAGE_NICENESS is overriden via the env then it will
+# still pass through the portageq call and override properly.
+if [ -n "${PORTAGE_NICENESS}" ]; then
+ renice $PORTAGE_NICENESS $$ > /dev/null
+fi
+
+source "${PORTAGE_BIN_PATH}"/isolated-functions.sh || exit 1
+
+do_verbose=0
+do_debug=0
+
+if has webrsync-gpg ${FEATURES} ; then
+ WEBSYNC_VERIFY_SIGNATURE=1
+else
+ WEBSYNC_VERIFY_SIGNATURE=0
+fi
+if [ ${WEBSYNC_VERIFY_SIGNATURE} != 0 -a -z "${PORTAGE_GPG_DIR}" ]; then
+ eecho "please set PORTAGE_GPG_DIR in make.conf"
+ exit 1
+fi
+
+do_tar() {
+ local file=$1; shift
+ local decompressor
+ case ${file} in
+ *.xz) decompressor="xzcat" ;;
+ *.bz2) decompressor="bzcat" ;;
+ *.gz) decompressor="zcat" ;;
+ *) decompressor="cat" ;;
+ esac
+ ${decompressor} "${file}" | tar "$@"
+ _pipestatus=${PIPESTATUS[*]}
+ [[ ${_pipestatus// /} -eq 0 ]]
+}
+
+get_utc_date_in_seconds() {
+ date -u +"%s"
+}
+
+get_date_part() {
+ local utc_time_in_secs="$1"
+ local part="$2"
+
+ if [[ ${USERLAND} == BSD ]] ; then
+ date -r ${utc_time_in_secs} -u +"${part}"
+ else
+ date -d @${utc_time_in_secs} -u +"${part}"
+ fi
+}
+
+get_utc_second_from_string() {
+ local s="$1"
+ if [[ ${USERLAND} == BSD ]] ; then
+ date -juf "%Y%m%d" "$s" +"%s"
+ else
+ date -d "${s:0:4}-${s:4:2}-${s:6:2}" -u +"%s"
+ fi
+}
+
+get_portage_timestamp() {
+ local portage_current_timestamp=0
+
+ if [ -f "${PORTDIR}/metadata/timestamp.x" ]; then
+ portage_current_timestamp=$(cut -f 1 -d " " "${PORTDIR}/metadata/timestamp.x" )
+ fi
+
+ echo "${portage_current_timestamp}"
+}
+
+fetch_file() {
+ local URI="$1"
+ local FILE="$2"
+ local opts
+
+ if [ "${FETCHCOMMAND/wget/}" != "${FETCHCOMMAND}" ]; then
+ opts="--continue $(nvecho -q)"
+ elif [ "${FETCHCOMMAND/curl/}" != "${FETCHCOMMAND}" ]; then
+ opts="--continue-at - $(nvecho -s -f)"
+ else
+ rm -f "${FILE}"
+ fi
+
+ vecho "Fetching file ${FILE} ..."
+ # already set DISTDIR=
+ eval "${FETCHCOMMAND}" ${opts}
+ [ -s "${FILE}" ]
+}
+
+check_file_digest() {
+ local digest="$1"
+ local file="$2"
+ local r=1
+
+ vecho "Checking digest ..."
+
+ if type -P md5sum > /dev/null; then
+ md5sum -c $digest && r=0
+ elif type -P md5 > /dev/null; then
+ [ "$(md5 -q "${file}")" == "$(cut -d ' ' -f 1 "${digest}")" ] && r=0
+ else
+ eecho "cannot check digest: no suitable md5/md5sum binaries found"
+ fi
+
+ return "${r}"
+}
+
+check_file_signature() {
+ local signature="$1"
+ local file="$2"
+ local r=1
+
+ if [ ${WEBSYNC_VERIFY_SIGNATURE} != 0 ]; then
+
+ vecho "Checking signature ..."
+
+ if type -P gpg > /dev/null; then
+ gpg --homedir "${PORTAGE_GPG_DIR}" --verify "$signature" "$file" && r=0
+ else
+ eecho "cannot check signature: gpg binary not found"
+ fi
+ else
+ r=0
+ fi
+
+ return "${r}"
+}
+
+get_snapshot_timestamp() {
+ local file="$1"
+
+ do_tar "${file}" --to-stdout -xf - portage/metadata/timestamp.x | cut -f 1 -d " "
+}
+
+sync_local() {
+ local file="$1"
+
+ vecho "Syncing local tree ..."
+
+ if type -P tarsync > /dev/null ; then
+ local chown_opts="-o portage -g portage"
+ chown portage:portage portage > /dev/null 2>&1 || chown_opts=""
+ if ! tarsync $(vvecho -v) -s 1 ${chown_opts} \
+ -e /distfiles -e /packages -e /local "${file}" "${PORTDIR}"; then
+ eecho "tarsync failed; tarball is corrupt? (${file})"
+ return 1
+ fi
+ else
+ if ! do_tar "${file}" xf -; then
+ eecho "tar failed to extract the image. tarball is corrupt? (${file})"
+ rm -fr portage
+ return 1
+ fi
+
+ # Free disk space
+ rm -f "${file}"
+
+ chown portage:portage portage > /dev/null 2>&1 && \
+ chown -R portage:portage portage
+ cd portage
+ rsync -av --progress --stats --delete --delete-after \
+ --exclude='/distfiles' --exclude='/packages' \
+ --exclude='/local' ${PORTAGE_RSYNC_EXTRA_OPTS} . "${PORTDIR%%/}"
+ cd ..
+
+ vecho "Cleaning up ..."
+ rm -fr portage
+ fi
+
+ if has metadata-transfer ${FEATURES} ; then
+ vecho "Updating cache ..."
+ emerge --metadata
+ fi
+ [ -x /etc/portage/bin/post_sync ] && /etc/portage/bin/post_sync
+ return 0
+}
+
+do_snapshot() {
+ local ignore_timestamp="$1"
+ local date="$2"
+
+ local r=1
+
+ local base_file="portage-${date}.tar"
+
+ local have_files=0
+ local mirror
+
+ local compressions=""
+ # xz is not supported in app-arch/tarsync, so use
+ # bz2 format if we have tarsync.
+ if ! type -P tarsync > /dev/null ; then
+ type -P xzcat > /dev/null && compressions="${compressions} xz"
+ fi
+ type -P bzcat > /dev/null && compressions="${compressions} bz2"
+ type -P zcat > /dev/null && compressions="${compressions} gz"
+ if [[ -z ${compressions} ]] ; then
+ eecho "unable to locate any decompressors (xzcat or bzcat or zcat)"
+ exit 1
+ fi
+
+ for mirror in ${GENTOO_MIRRORS} ; do
+
+ vecho "Trying to retrieve ${date} snapshot from ${mirror} ..."
+
+ for compression in ${compressions} ; do
+ local file="portage-${date}.tar.${compression}"
+ local digest="${file}.md5sum"
+ local signature="${file}.gpgsig"
+
+ if [ -s "${file}" -a -s "${digest}" -a -s "${signature}" ] ; then
+ check_file_digest "${DISTDIR}/${digest}" "${DISTDIR}/${file}" && \
+ check_file_signature "${DISTDIR}/${signature}" "${DISTDIR}/${file}" && \
+ have_files=1
+ fi
+
+ if [ ${have_files} -eq 0 ] ; then
+ fetch_file "${mirror}/snapshots/${digest}" "${digest}" && \
+ fetch_file "${mirror}/snapshots/${signature}" "${signature}" && \
+ fetch_file "${mirror}/snapshots/${file}" "${file}" && \
+ check_file_digest "${DISTDIR}/${digest}" "${DISTDIR}/${file}" && \
+ check_file_signature "${DISTDIR}/${signature}" "${DISTDIR}/${file}" && \
+ have_files=1
+ fi
+
+ #
+ # If timestamp is invalid
+ # we want to try and retrieve
+ # from a different mirror
+ #
+ if [ ${have_files} -eq 1 ]; then
+
+ vecho "Getting snapshot timestamp ..."
+ local snapshot_timestamp=$(get_snapshot_timestamp "${file}")
+
+ if [ ${ignore_timestamp} == 0 ]; then
+ if [ ${snapshot_timestamp} -lt $(get_portage_timestamp) ]; then
+ wecho "portage is newer than snapshot"
+ have_files=0
+ fi
+ else
+ local utc_seconds=$(get_utc_second_from_string "${date}")
+
+ #
+ # Check that this snapshot
+ # is what it claims to be ...
+ #
+ if [ ${snapshot_timestamp} -lt ${utc_seconds} ] || \
+ [ ${snapshot_timestamp} -gt $((${utc_seconds}+ 2*86400)) ]; then
+
+ wecho "snapshot timestamp is not in acceptable period"
+ have_files=0
+ fi
+ fi
+ fi
+
+ if [ ${have_files} -eq 1 ]; then
+ break
+ else
+ #
+ # Remove files and use a different mirror
+ #
+ rm -f "${file}" "${digest}" "${signature}"
+ fi
+ done
+
+ [ ${have_files} -eq 1 ] && break
+ done
+
+ if [ ${have_files} -eq 1 ]; then
+ sync_local "${file}" && r=0
+ else
+ vecho "${date} snapshot was not found"
+ fi
+
+ rm -f "${file}" "${digest}" "${signature}"
+ return "${r}"
+}
+
+do_latest_snapshot() {
+ local attempts=0
+ local r=1
+
+ vecho "Fetching most recent snapshot ..."
+
+ # The snapshot for a given day is generated at 01:45 UTC on the following
+ # day, so the current day's snapshot (going by UTC time) hasn't been
+ # generated yet. Therefore, always start by looking for the previous day's
+ # snapshot (for attempts=1, subtract 1 day from the current UTC time).
+
+ # Timestamps that differ by less than 2 hours
+ # are considered to be approximately equal.
+ local min_time_diff=$(( 2 * 60 * 60 ))
+
+ local existing_timestamp=$(get_portage_timestamp)
+ local timestamp_difference
+ local timestamp_problem
+ local approx_snapshot_time
+ local start_time=$(get_utc_date_in_seconds)
+ local start_hour=$(get_date_part ${start_time} "%H")
+
+ # Daily snapshots are created at 1:45 AM and are not
+ # available until after 2 AM. Don't waste time trying
+ # to fetch a snapshot before it's been created.
+ if [ ${start_hour} -lt 2 ] ; then
+ (( start_time -= 86400 ))
+ fi
+ local snapshot_date=$(get_date_part ${start_time} "%Y%m%d")
+ local snapshot_date_seconds=$(get_utc_second_from_string ${snapshot_date})
+
+ while (( ${attempts} < 40 )) ; do
+ (( attempts++ ))
+ (( snapshot_date_seconds -= 86400 ))
+ # snapshots are created at 1:45 AM
+ (( approx_snapshot_time = snapshot_date_seconds + 86400 + 6300 ))
+ (( timestamp_difference = existing_timestamp - approx_snapshot_time ))
+ [ ${timestamp_difference} -lt 0 ] && (( timestamp_difference = -1 * timestamp_difference ))
+ snapshot_date=$(get_date_part ${snapshot_date_seconds} "%Y%m%d")
+
+ timestamp_problem=""
+ if [ ${timestamp_difference} -eq 0 ]; then
+ timestamp_problem="is identical to"
+ elif [ ${timestamp_difference} -lt ${min_time_diff} ]; then
+ timestamp_problem="is possibly identical to"
+ elif [ ${approx_snapshot_time} -lt ${existing_timestamp} ] ; then
+ timestamp_problem="is newer than"
+ fi
+
+ if [ -n "${timestamp_problem}" ]; then
+ ewarn "Latest snapshot date: ${snapshot_date}"
+ ewarn
+ ewarn "Approximate snapshot timestamp: ${approx_snapshot_time}"
+ ewarn " Current local timestamp: ${existing_timestamp}"
+ ewarn
+ echo -e "The current local timestamp" \
+ "${timestamp_problem} the" \
+ "timestamp of the latest" \
+ "snapshot. In order to force sync," \
+ "use the --revert option or remove" \
+ "the timestamp file located at" \
+ "'${PORTDIR}/metadata/timestamp.x'." | fmt -w 70 | \
+ while read -r line ; do
+ ewarn "${line}"
+ done
+ r=0
+ break
+ fi
+
+ if do_snapshot 0 "${snapshot_date}"; then
+ r=0
+ break;
+ fi
+ done
+
+ return "${r}"
+}
+
+usage() {
+ cat <<-EOF
+ Usage: $0 [options]
+
+ Options:
+ --revert=yyyymmdd Revert to snapshot
+ -q, --quiet Only output errors
+ -v, --verbose Enable verbose output
+ -x, --debug Enable debug output
+ -h, --help This help screen (duh!)
+ EOF
+ if [[ -n $* ]] ; then
+ printf "\nError: %s\n" "$*" 1>&2
+ exit 1
+ else
+ exit 0
+ fi
+}
+
+main() {
+ local arg
+ local revert_date
+
+ [ ! -d "${DISTDIR}" ] && mkdir -p "${DISTDIR}"
+ cd "${DISTDIR}"
+
+ for arg in "$@" ; do
+ local v=${arg#*=}
+ case ${arg} in
+ -h|--help) usage ;;
+ -q|--quiet) PORTAGE_QUIET=1 ;;
+ -v|--verbose) do_verbose=1 ;;
+ -x|--debug) do_debug=1 ;;
+ --revert=*) revert_date=${v} ;;
+ *) usage "Invalid option '${arg}'" ;;
+ esac
+ done
+
+ # This is a sanity check to help prevent people like funtoo users
+ # from accidentally wiping out their git tree.
+ if [[ -n $SYNC && ${SYNC#rsync:} = $SYNC ]] ; then
+ echo "The current SYNC variable setting does not refer to an rsync URI:" >&2
+ echo >&2
+ echo " SYNC=$SYNC" >&2
+ echo >&2
+ echo "If you intend to use emerge-webrsync then please" >&2
+ echo "adjust SYNC to refer to an rsync URI." >&2
+ echo "emerge-webrsync exiting due to abnormal SYNC setting." >&2
+ exit 1
+ fi
+
+ [[ ${do_debug} -eq 1 ]] && set -x
+
+ if [[ -n ${revert_date} ]] ; then
+ do_snapshot 1 "${revert_date}"
+ else
+ do_latest_snapshot
+ fi
+}
+
+main "$@"
diff --git a/portage_with_autodep/bin/env-update b/portage_with_autodep/bin/env-update
new file mode 100755
index 0000000..8a69f2b
--- /dev/null
+++ b/portage_with_autodep/bin/env-update
@@ -0,0 +1,41 @@
+#!/usr/bin/python -O
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import errno
+import sys
+
+def usage(status):
+ print("Usage: env-update [--no-ldconfig]")
+ print("")
+ print("See the env-update(1) man page for more info")
+ sys.exit(status)
+
+if "-h" in sys.argv or "--help" in sys.argv:
+ usage(0)
+
+makelinks=1
+if "--no-ldconfig" in sys.argv:
+ makelinks=0
+ sys.argv.pop(sys.argv.index("--no-ldconfig"))
+
+if len(sys.argv) > 1:
+ print("!!! Invalid command line options!\n")
+ usage(1)
+
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+try:
+ portage.env_update(makelinks)
+except IOError as e:
+ if e.errno == errno.EACCES:
+ print("env-update: Need superuser access")
+ sys.exit(1)
+ else:
+ raise
diff --git a/portage_with_autodep/bin/etc-update b/portage_with_autodep/bin/etc-update
new file mode 100755
index 0000000..42518ad
--- /dev/null
+++ b/portage_with_autodep/bin/etc-update
@@ -0,0 +1,616 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Author Brandon Low <lostlogic@gentoo.org>
+#
+# Previous version (from which I've borrowed a few bits) by:
+# Jochem Kossen <j.kossen@home.nl>
+# Leo Lipelis <aeoo@gentoo.org>
+# Karl Trygve Kalleberg <karltk@gentoo.org>
+
+cd /
+
+if type -P gsed >/dev/null ; then
+ sed() { gsed "$@"; }
+fi
+
+get_config() {
+ # the sed here does:
+ # - strip off comments
+ # - match lines that set item in question
+ # - delete the "item =" part
+ # - store the actual value into the hold space
+ # - on the last line, restore the hold space and print it
+ # If there's more than one of the same configuration item, then
+ # the store to the hold space clobbers previous value so the last
+ # setting takes precedence.
+ local item=$1
+ eval echo $(sed -n \
+ -e 's:[[:space:]]*#.*$::' \
+ -e "/^[[:space:]]*$item[[:space:]]*=/{s:[^=]*=[[:space:]]*\([\"']\{0,1\}\)\(.*\)\1:\2:;h}" \
+ -e '${g;p}' \
+ "${PORTAGE_CONFIGROOT}"etc/etc-update.conf)
+}
+
+diff_command() {
+ local cmd=${diff_command//%file1/$1}
+ ${cmd//%file2/$2}
+}
+
+scan() {
+ echo "Scanning Configuration files..."
+ rm -rf ${TMP}/files > /dev/null 2>&1
+ mkdir ${TMP}/files || die "Failed mkdir command!" 1
+ count=0
+ input=0
+ local find_opts
+ local my_basename
+
+ for path in ${CONFIG_PROTECT} ; do
+ path="${ROOT}${path}"
+ # Do not traverse hidden directories such as .svn or .git.
+ find_opts="-name .* -type d -prune -o -name ._cfg????_*"
+ if [ ! -d "${path}" ]; then
+ [ ! -f "${path}" ] && continue
+ my_basename="${path##*/}"
+ path="${path%/*}"
+ find_opts="-maxdepth 1 -name ._cfg????_${my_basename}"
+ fi
+
+ ofile=""
+ # The below set -f turns off file name globbing in the ${find_opts} expansion.
+ for file in $(set -f ; find ${path}/ ${find_opts} \
+ ! -name '.*~' ! -iname '.*.bak' -print |
+ sed -e "s:\(^.*/\)\(\._cfg[0-9]*_\)\(.*$\):\1\2\3\%\1%\2\%\3:" |
+ sort -t'%' -k2,2 -k4,4 -k3,3 | LANG=POSIX LC_ALL=POSIX cut -f1 -d'%'); do
+
+ rpath=$(echo "${file/\/\///}" | sed -e "s:/[^/]*$::")
+ rfile=$(echo "${file/\/\///}" | sed -e "s:^.*/::")
+ for mpath in ${CONFIG_PROTECT_MASK}; do
+ mpath="${ROOT}${mpath}"
+ mpath=$(echo "${mpath/\/\///}")
+ if [[ "${rpath}" == "${mpath}"* ]]; then
+ mv ${rpath}/${rfile} ${rpath}/${rfile:10}
+ break
+ fi
+ done
+ if [[ ! -f ${file} ]] ; then
+ echo "Skipping non-file ${file} ..."
+ continue
+ fi
+
+ if [[ "${ofile:10}" != "${rfile:10}" ]] ||
+ [[ ${opath} != ${rpath} ]]; then
+ MATCHES=0
+ if [[ "${EU_AUTOMERGE}" == "yes" ]]; then
+ if [ ! -e "${rpath}/${rfile}" ] || [ ! -e "${rpath}/${rfile:10}" ]; then
+ MATCHES=0
+ else
+ diff -Bbua ${rpath}/${rfile} ${rpath}/${rfile:10} | egrep '^[+-]' | egrep -v '^[+-][\t ]*#|^--- |^\+\+\+ ' | egrep -qv '^[-+][\t ]*$'
+ MATCHES=$?
+ fi
+ elif [[ -z $(diff -Nua ${rpath}/${rfile} ${rpath}/${rfile:10}|
+ grep "^[+-][^+-]"|grep -v '# .Header:.*') ]]; then
+ MATCHES=1
+ fi
+ if [[ "${MATCHES}" == "1" ]]; then
+ echo "Automerging trivial changes in: ${rpath}/${rfile:10}"
+ mv ${rpath}/${rfile} ${rpath}/${rfile:10}
+ continue
+ else
+ count=${count}+1
+ echo "${rpath}/${rfile:10}" > ${TMP}/files/${count}
+ echo "${rpath}/${rfile}" >> ${TMP}/files/${count}
+ ofile="${rfile}"
+ opath="${rpath}"
+ continue
+ fi
+ fi
+
+ if [[ -z $(diff -Nua ${rpath}/${rfile} ${rpath}/${ofile}|
+ grep "^[+-][^+-]"|grep -v '# .Header:.*') ]]; then
+ mv ${rpath}/${rfile} ${rpath}/${ofile}
+ continue
+ else
+ echo "${rpath}/${rfile}" >> ${TMP}/files/${count}
+ ofile="${rfile}"
+ opath="${rpath}"
+ fi
+ done
+ done
+
+}
+
+sel_file() {
+ local -i isfirst=0
+ until [[ -f ${TMP}/files/${input} ]] || \
+ [[ ${input} == -1 ]] || \
+ [[ ${input} == -3 ]]
+ do
+ local numfiles=$(ls ${TMP}/files|wc -l)
+ local numwidth=${#numfiles}
+ for file in $(ls ${TMP}/files|sort -n); do
+ if [[ ${isfirst} == 0 ]] ; then
+ isfirst=${file}
+ fi
+ numshow=$(printf "%${numwidth}i${PAR} " ${file})
+ numupdates=$(( $(wc -l <${TMP}/files/${file}) - 1 ))
+ echo -n "${numshow}"
+ if [[ ${mode} == 0 ]] ; then
+ echo "$(head -n1 ${TMP}/files/${file}) (${numupdates})"
+ else
+ head -n1 ${TMP}/files/${file}
+ fi
+ done > ${TMP}/menuitems
+
+ if [ "${OVERWRITE_ALL}" == "yes" ]; then
+ input=0
+ elif [ "${DELETE_ALL}" == "yes" ]; then
+ input=0
+ else
+ [[ $CLEAR_TERM == yes ]] && clear
+ if [[ ${mode} == 0 ]] ; then
+ echo "The following is the list of files which need updating, each
+configuration file is followed by a list of possible replacement files."
+ else
+ local my_title="Please select a file to update"
+ fi
+
+ if [[ ${mode} == 0 ]] ; then
+ cat ${TMP}/menuitems
+ echo "Please select a file to edit by entering the corresponding number."
+ echo " (don't use -3, -5, -7 or -9 if you're unsure what to do)"
+ echo " (-1 to exit) (-3 to auto merge all remaining files)"
+ echo " (-5 to auto-merge AND not use 'mv -i')"
+ echo " (-7 to discard all updates)"
+ echo -n " (-9 to discard all updates AND not use 'rm -i'): "
+ input=$(read_int)
+ else
+ dialog --title "${title}" --menu "${my_title}" \
+ 0 0 0 $(echo -e "-1 Exit\n$(<${TMP}/menuitems)") \
+ 2> ${TMP}/input || die "User termination!" 0
+ input=$(<${TMP}/input)
+ fi
+ if [[ ${input} == -9 ]]; then
+ read -p "Are you sure that you want to delete all updates (type YES):" reply
+ if [[ ${reply} != "YES" ]]; then
+ continue
+ else
+ input=-7
+ export rm_opts=""
+ fi
+ fi
+ if [[ ${input} == -7 ]]; then
+ input=0
+ export DELETE_ALL="yes"
+ fi
+ if [[ ${input} == -5 ]] ; then
+ input=-3
+ export mv_opts=" ${mv_opts} "
+ mv_opts="${mv_opts// -i / }"
+ fi
+ if [[ ${input} == -3 ]] ; then
+ input=0
+ export OVERWRITE_ALL="yes"
+ fi
+ fi # -3 automerge
+ if [[ -z ${input} ]] || [[ ${input} == 0 ]] ; then
+ input=${isfirst}
+ fi
+ done
+}
+
+user_special() {
+ if [ -r ${PORTAGE_CONFIGROOT}etc/etc-update.special ]; then
+ if [ -z "$1" ]; then
+ echo "ERROR: user_special() called without arguments"
+ return 1
+ fi
+ while read -r pat; do
+ echo ${1} | grep "${pat}" > /dev/null && return 0
+ done < ${PORTAGE_CONFIGROOT}etc/etc-update.special
+ fi
+ return 1
+}
+
+read_int() {
+ # Read an integer from stdin. Continously loops until a valid integer is
+ # read. This is a workaround for odd behavior of bash when an attempt is
+ # made to store a value such as "1y" into an integer-only variable.
+ local my_input
+ while true; do
+ read my_input
+ # failed integer conversions will break a loop unless they're enclosed
+ # in a subshell.
+ echo "${my_input}" | ( declare -i x; read x) 2>/dev/null && break
+ echo -n "Value '$my_input' is not valid. Please enter an integer value:" >&2
+ done
+ echo ${my_input}
+}
+
+do_file() {
+ interactive_echo() { [ "${OVERWRITE_ALL}" != "yes" ] && [ "${DELETE_ALL}" != "yes" ] && echo; }
+ interactive_echo
+ local -i my_input
+ local -i fcount=0
+ until (( $(wc -l < ${TMP}/files/${input}) < 2 )); do
+ my_input=0
+ if (( $(wc -l < ${TMP}/files/${input}) == 2 )); then
+ my_input=1
+ fi
+ until (( ${my_input} > 0 )) && (( ${my_input} < $(wc -l < ${TMP}/files/${input}) )); do
+ fcount=0
+
+ if [ "${OVERWRITE_ALL}" == "yes" ]; then
+ my_input=0
+ elif [ "${DELETE_ALL}" == "yes" ]; then
+ my_input=0
+ else
+ for line in $(<${TMP}/files/${input}); do
+ if (( ${fcount} > 0 )); then
+ echo -n "${fcount}${PAR} "
+ echo "${line}"
+ else
+ if [[ ${mode} == 0 ]] ; then
+ echo "Below are the new config files for ${line}:"
+ else
+ local my_title="Please select a file to process for ${line}"
+ fi
+ fi
+ fcount=${fcount}+1
+ done > ${TMP}/menuitems
+
+ if [[ ${mode} == 0 ]] ; then
+ cat ${TMP}/menuitems
+ echo -n "Please select a file to process (-1 to exit this file): "
+ my_input=$(read_int)
+ else
+ dialog --title "${title}" --menu "${my_title}" \
+ 0 0 0 $(echo -e "$(<${TMP}/menuitems)\n${fcount} Exit") \
+ 2> ${TMP}/input || die "User termination!" 0
+ my_input=$(<${TMP}/input)
+ fi
+ fi # OVERWRITE_ALL
+
+ if [[ ${my_input} == 0 ]] ; then
+ my_input=1
+ elif [[ ${my_input} == -1 ]] ; then
+ input=0
+ return
+ elif [[ ${my_input} == ${fcount} ]] ; then
+ break
+ fi
+ done
+ if [[ ${my_input} == ${fcount} ]] ; then
+ break
+ fi
+
+ fcount=${my_input}+1
+
+ file=$(sed -e "${fcount}p;d" ${TMP}/files/${input})
+ ofile=$(head -n1 ${TMP}/files/${input})
+
+ do_cfg "${file}" "${ofile}"
+
+ sed -e "${fcount}!p;d" ${TMP}/files/${input} > ${TMP}/files/sed
+ mv ${TMP}/files/sed ${TMP}/files/${input}
+
+ if [[ ${my_input} == -1 ]] ; then
+ break
+ fi
+ done
+ interactive_echo
+ rm ${TMP}/files/${input}
+ count=${count}-1
+}
+
+do_cfg() {
+
+ local file="${1}"
+ local ofile="${2}"
+ local -i my_input=0
+
+ until (( ${my_input} == -1 )) || [ ! -f ${file} ]; do
+ if [[ "${OVERWRITE_ALL}" == "yes" ]] && ! user_special "${ofile}"; then
+ my_input=1
+ elif [[ "${DELETE_ALL}" == "yes" ]] && ! user_special "${ofile}"; then
+ my_input=2
+ else
+ [[ $CLEAR_TERM == yes ]] && clear
+ if [ "${using_editor}" == 0 ]; then
+ (
+ echo "Showing differences between ${ofile} and ${file}"
+ diff_command "${ofile}" "${file}"
+ ) | ${pager}
+ else
+ echo "Beginning of differences between ${ofile} and ${file}"
+ diff_command "${ofile}" "${file}"
+ echo "End of differences between ${ofile} and ${file}"
+ fi
+ if [ -L "${file}" ]; then
+ echo
+ echo "-------------------------------------------------------------"
+ echo "NOTE: File is a symlink to another file. REPLACE recommended."
+ echo " The original file may simply have moved. Please review."
+ echo "-------------------------------------------------------------"
+ echo
+ fi
+ echo -n "File: ${file}
+1) Replace original with update
+2) Delete update, keeping original as is
+3) Interactively merge original with update
+4) Show differences again
+5) Save update as example config
+Please select from the menu above (-1 to ignore this update): "
+ my_input=$(read_int)
+ fi
+
+ case ${my_input} in
+ 1) echo "Replacing ${ofile} with ${file}"
+ mv ${mv_opts} ${file} ${ofile}
+ [ -n "${OVERWRITE_ALL}" ] && my_input=-1
+ continue
+ ;;
+ 2) echo "Deleting ${file}"
+ rm ${rm_opts} ${file}
+ [ -n "${DELETE_ALL}" ] && my_input=-1
+ continue
+ ;;
+ 3) do_merge "${file}" "${ofile}"
+ my_input=${?}
+# [ ${my_input} == 255 ] && my_input=-1
+ continue
+ ;;
+ 4) continue
+ ;;
+ 5) do_distconf "${file}" "${ofile}"
+ ;;
+ *) continue
+ ;;
+ esac
+ done
+}
+
+do_merge() {
+ # make sure we keep the merged file in the secure tempdir
+ # so we dont leak any information contained in said file
+ # (think of case where the file has 0600 perms; during the
+ # merging process, the temp file gets umask perms!)
+
+ local file="${1}"
+ local ofile="${2}"
+ local mfile="${TMP}/${2}.merged"
+ local -i my_input=0
+ echo "${file} ${ofile} ${mfile}"
+
+ if [[ -e ${mfile} ]] ; then
+ echo "A previous version of the merged file exists, cleaning..."
+ rm ${rm_opts} "${mfile}"
+ fi
+
+ # since mfile will be like $TMP/path/to/original-file.merged, we
+ # need to make sure the full /path/to/ exists ahead of time
+ mkdir -p "${mfile%/*}"
+
+ until (( ${my_input} == -1 )); do
+ echo "Merging ${file} and ${ofile}"
+ $(echo "${merge_command}" |
+ sed -e "s:%merged:${mfile}:g" \
+ -e "s:%orig:${ofile}:g" \
+ -e "s:%new:${file}:g")
+ until (( ${my_input} == -1 )); do
+ echo -n "1) Replace ${ofile} with merged file
+2) Show differences between merged file and original
+3) Remerge original with update
+4) Edit merged file
+5) Return to the previous menu
+Please select from the menu above (-1 to exit, losing this merge): "
+ my_input=$(read_int)
+ case ${my_input} in
+ 1) echo "Replacing ${ofile} with ${mfile}"
+ if [[ ${USERLAND} == BSD ]] ; then
+ chown "$(stat -f %Su:%Sg "${ofile}")" "${mfile}"
+ chmod $(stat -f %Mp%Lp "${ofile}") "${mfile}"
+ else
+ chown --reference="${ofile}" "${mfile}"
+ chmod --reference="${ofile}" "${mfile}"
+ fi
+ mv ${mv_opts} "${mfile}" "${ofile}"
+ rm ${rm_opts} "${file}"
+ return 255
+ ;;
+ 2)
+ [[ $CLEAR_TERM == yes ]] && clear
+ if [ "${using_editor}" == 0 ]; then
+ (
+ echo "Showing differences between ${ofile} and ${mfile}"
+ diff_command "${ofile}" "${mfile}"
+ ) | ${pager}
+ else
+ echo "Beginning of differences between ${ofile} and ${mfile}"
+ diff_command "${ofile}" "${mfile}"
+ echo "End of differences between ${ofile} and ${mfile}"
+ fi
+ continue
+ ;;
+ 3) break
+ ;;
+ 4) ${EDITOR:-nano -w} "${mfile}"
+ continue
+ ;;
+ 5) rm ${rm_opts} "${mfile}"
+ return 0
+ ;;
+ *) continue
+ ;;
+ esac
+ done
+ done
+ rm ${rm_opts} "${mfile}"
+ return 255
+}
+
+do_distconf() {
+ # search for any previously saved distribution config
+ # files and number the current one accordingly
+
+ local file="${1}"
+ local ofile="${2}"
+ local -i count
+ local -i fill
+ local suffix
+ local efile
+
+ for ((count = 0; count <= 9999; count++)); do
+ suffix=".dist_"
+ for ((fill = 4 - ${#count}; fill > 0; fill--)); do
+ suffix+="0"
+ done
+ suffix+="${count}"
+ efile="${ofile}${suffix}"
+ if [[ ! -f ${efile} ]]; then
+ mv ${mv_opts} "${file}" "${efile}"
+ break
+ elif diff_command "${file}" "${efile}" &> /dev/null; then
+ # replace identical copy
+ mv "${file}" "${efile}"
+ break
+ fi
+ done
+}
+
+die() {
+ trap SIGTERM
+ trap SIGINT
+
+ if [ "$2" -eq 0 ]; then
+ echo "Exiting: ${1}"
+ scan > /dev/null
+ [ ${count} -gt 0 ] && echo "NOTE: ${count} updates remaining"
+ else
+ echo "ERROR: ${1}"
+ fi
+
+ rm -rf "${TMP}"
+ exit ${2}
+}
+
+usage() {
+ cat <<-EOF
+ etc-update: Handle configuration file updates
+
+ Usage: etc-update [options]
+
+ Options:
+ -d, --debug Enable shell debugging
+ -h, --help Show help and run away
+ -V, --version Show version and trundle away
+ EOF
+
+ [[ -n ${*:2} ]] && printf "\nError: %s\n" "${*:2}" 1>&2
+
+ exit ${1:-0}
+}
+
+#
+# Run the script
+#
+
+SET_X=false
+while [[ -n $1 ]] ; do
+ case $1 in
+ -d|--debug) SET_X=true;;
+ -h|--help) usage;;
+ -V|--version) emerge --version ; exit 0;;
+ *) usage 1 "Invalid option '$1'";;
+ esac
+ shift
+done
+${SET_X} && set -x
+
+type portageq > /dev/null || exit $?
+eval $(portageq envvar -v CONFIG_PROTECT \
+ CONFIG_PROTECT_MASK PORTAGE_CONFIGROOT PORTAGE_TMPDIR ROOT USERLAND)
+export PORTAGE_TMPDIR
+
+TMP="${PORTAGE_TMPDIR}/etc-update-$$"
+trap "die terminated 1" SIGTERM
+trap "die interrupted 1" SIGINT
+
+[ -w ${PORTAGE_CONFIGROOT}etc ] || die "Need write access to ${PORTAGE_CONFIGROOT}etc" 1
+#echo $PORTAGE_TMPDIR
+#echo $CONFIG_PROTECT
+#echo $CONFIG_PROTECT_MASK
+#export PORTAGE_TMPDIR=$(/usr/lib/portage/bin/portageq envvar PORTAGE_TMPDIR)
+
+rm -rf "${TMP}" 2> /dev/null
+mkdir "${TMP}" || die "failed to create temp dir" 1
+# make sure we have a secure directory to work in
+chmod 0700 "${TMP}" || die "failed to set perms on temp dir" 1
+chown ${UID:-0}:${GID:-0} "${TMP}" || die "failed to set ownership on temp dir" 1
+
+# I need the CONFIG_PROTECT value
+#CONFIG_PROTECT=$(/usr/lib/portage/bin/portageq envvar CONFIG_PROTECT)
+#CONFIG_PROTECT_MASK=$(/usr/lib/portage/bin/portageq envvar CONFIG_PROTECT_MASK)
+
+# load etc-config's configuration
+CLEAR_TERM=$(get_config clear_term)
+EU_AUTOMERGE=$(get_config eu_automerge)
+rm_opts=$(get_config rm_opts)
+mv_opts=$(get_config mv_opts)
+cp_opts=$(get_config cp_opts)
+pager=$(get_config pager)
+diff_command=$(get_config diff_command)
+using_editor=$(get_config using_editor)
+merge_command=$(get_config merge_command)
+declare -i mode=$(get_config mode)
+[[ -z ${mode} ]] && mode=0
+[[ -z ${pager} ]] && pager="cat"
+
+if [ "${using_editor}" == 0 ]; then
+ # Sanity check to make sure diff exists and works
+ echo > "${TMP}"/.diff-test-1
+ echo > "${TMP}"/.diff-test-2
+
+ if ! diff_command "${TMP}"/.diff-test-1 "${TMP}"/.diff-test-2 ; then
+ die "'${diff_command}' does not seem to work, aborting" 1
+ fi
+else
+ if ! type ${diff_command%% *} >/dev/null; then
+ die "'${diff_command}' does not seem to work, aborting" 1
+ fi
+fi
+
+if [[ ${mode} == "1" ]] ; then
+ if ! type dialog >/dev/null || ! dialog --help >/dev/null ; then
+ die "mode=1 and 'dialog' not found or not executable, aborting" 1
+ fi
+fi
+
+#echo "rm_opts: $rm_opts, mv_opts: $mv_opts, cp_opts: $cp_opts"
+#echo "pager: $pager, diff_command: $diff_command, merge_command: $merge_command"
+
+if (( ${mode} == 0 )); then
+ PAR=")"
+else
+ PAR=""
+fi
+
+declare -i count=0
+declare input=0
+declare title="Gentoo's etc-update tool!"
+
+scan
+
+until (( ${input} == -1 )); do
+ if (( ${count} == 0 )); then
+ die "Nothing left to do; exiting. :)" 0
+ fi
+ sel_file
+ if (( ${input} != -1 )); then
+ do_file
+ fi
+done
+
+die "User termination!" 0
diff --git a/portage_with_autodep/bin/filter-bash-environment.py b/portage_with_autodep/bin/filter-bash-environment.py
new file mode 100755
index 0000000..b9aec96
--- /dev/null
+++ b/portage_with_autodep/bin/filter-bash-environment.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import codecs
+import io
+import optparse
+import os
+import re
+import sys
+
+here_doc_re = re.compile(r'.*\s<<[-]?(\w+)$')
+func_start_re = re.compile(r'^[-\w]+\s*\(\)\s*$')
+func_end_re = re.compile(r'^\}$')
+
+var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?.*$')
+close_quote_re = re.compile(r'(\\"|"|\')\s*$')
+readonly_re = re.compile(r'^declare\s+-(\S*)r(\S*)\s+')
+# declare without assignment
+var_declare_re = re.compile(r'^declare(\s+-\S+)?\s+([^=\s]+)\s*$')
+
+def have_end_quote(quote, line):
+ """
+ Check if the line has an end quote (useful for handling multi-line
+ quotes). This handles escaped double quotes that may occur at the
+ end of a line. The posix spec does not allow escaping of single
+ quotes inside of single quotes, so that case is not handled.
+ """
+ close_quote_match = close_quote_re.search(line)
+ return close_quote_match is not None and \
+ close_quote_match.group(1) == quote
+
+def filter_declare_readonly_opt(line):
+ readonly_match = readonly_re.match(line)
+ if readonly_match is not None:
+ declare_opts = ''
+ for i in (1, 2):
+ group = readonly_match.group(i)
+ if group is not None:
+ declare_opts += group
+ if declare_opts:
+ line = 'declare -%s %s' % \
+ (declare_opts, line[readonly_match.end():])
+ else:
+ line = 'declare ' + line[readonly_match.end():]
+ return line
+
+def filter_bash_environment(pattern, file_in, file_out):
+ # Filter out any instances of the \1 character from variable values
+ # since this character multiplies each time that the environment
+ # is saved (strange bash behavior). This can eventually result in
+ # mysterious 'Argument list too long' errors from programs that have
+ # huge strings of \1 characters in their environment. See bug #222091.
+ here_doc_delim = None
+ in_func = None
+ multi_line_quote = None
+ multi_line_quote_filter = None
+ for line in file_in:
+ if multi_line_quote is not None:
+ if not multi_line_quote_filter:
+ file_out.write(line.replace("\1", ""))
+ if have_end_quote(multi_line_quote, line):
+ multi_line_quote = None
+ multi_line_quote_filter = None
+ continue
+ if here_doc_delim is None and in_func is None:
+ var_assign_match = var_assign_re.match(line)
+ if var_assign_match is not None:
+ quote = var_assign_match.group(3)
+ filter_this = pattern.match(var_assign_match.group(2)) \
+ is not None
+ # Exclude the start quote when searching for the end quote,
+ # to ensure that the start quote is not misidentified as the
+ # end quote (happens if there is a newline immediately after
+ # the start quote).
+ if quote is not None and not \
+ have_end_quote(quote, line[var_assign_match.end(2)+2:]):
+ multi_line_quote = quote
+ multi_line_quote_filter = filter_this
+ if not filter_this:
+ line = filter_declare_readonly_opt(line)
+ file_out.write(line.replace("\1", ""))
+ continue
+ else:
+ declare_match = var_declare_re.match(line)
+ if declare_match is not None:
+ # declare without assignment
+ filter_this = pattern.match(declare_match.group(2)) \
+ is not None
+ if not filter_this:
+ line = filter_declare_readonly_opt(line)
+ file_out.write(line)
+ continue
+
+ if here_doc_delim is not None:
+ if here_doc_delim.match(line):
+ here_doc_delim = None
+ file_out.write(line)
+ continue
+ here_doc = here_doc_re.match(line)
+ if here_doc is not None:
+ here_doc_delim = re.compile("^%s$" % here_doc.group(1))
+ file_out.write(line)
+ continue
+ # Note: here-documents are handled before functions since otherwise
+ # it would be possible for the content of a here-document to be
+ # mistaken as the end of a function.
+ if in_func:
+ if func_end_re.match(line) is not None:
+ in_func = None
+ file_out.write(line)
+ continue
+ in_func = func_start_re.match(line)
+ if in_func is not None:
+ file_out.write(line)
+ continue
+ # This line is not recognized as part of a variable assignment,
+ # function definition, or here document, so just allow it to
+ # pass through.
+ file_out.write(line)
+
+if __name__ == "__main__":
+ description = "Filter out variable assignments for variable " + \
+ "names matching a given PATTERN " + \
+ "while leaving bash function definitions and here-documents " + \
+ "intact. The PATTERN is a space separated list of variable names" + \
+ " and it supports python regular expression syntax."
+ usage = "usage: %s PATTERN" % os.path.basename(sys.argv[0])
+ parser = optparse.OptionParser(description=description, usage=usage)
+ options, args = parser.parse_args(sys.argv[1:])
+ if len(args) != 1:
+ parser.error("Missing required PATTERN argument.")
+ file_in = sys.stdin
+ file_out = sys.stdout
+ if sys.hexversion >= 0x3000000:
+ file_in = codecs.iterdecode(sys.stdin.buffer.raw,
+ 'utf_8', errors='replace')
+ file_out = io.TextIOWrapper(sys.stdout.buffer,
+ 'utf_8', errors='backslashreplace')
+
+ var_pattern = args[0].split()
+
+ # Filter invalid variable names that are not supported by bash.
+ var_pattern.append(r'\d.*')
+ var_pattern.append(r'.*\W.*')
+
+ var_pattern = "^(%s)$" % "|".join(var_pattern)
+ filter_bash_environment(
+ re.compile(var_pattern), file_in, file_out)
+ file_out.flush()
diff --git a/portage_with_autodep/bin/fixpackages b/portage_with_autodep/bin/fixpackages
new file mode 100755
index 0000000..5e1df70
--- /dev/null
+++ b/portage_with_autodep/bin/fixpackages
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import os,sys
+os.environ["PORTAGE_CALLER"]="fixpackages"
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+
+from portage import os
+from portage.output import EOutput
+from textwrap import wrap
+from portage._global_updates import _global_updates
+mysettings = portage.settings
+mytrees = portage.db
+mtimedb = portage.mtimedb
+
+if mysettings['ROOT'] != "/":
+ out = EOutput()
+ msg = "The fixpackages program is not intended for use with " + \
+ "ROOT != \"/\". Instead use `emaint --fix movebin` and/or " + \
+ "`emaint --fix moveinst."
+ for line in wrap(msg, 72):
+ out.eerror(line)
+ sys.exit(1)
+
+try:
+ os.nice(int(mysettings.get("PORTAGE_NICENESS", "0")))
+except (OSError, ValueError) as e:
+ portage.writemsg("!!! Failed to change nice value to '%s'\n" % \
+ mysettings["PORTAGE_NICENESS"])
+ portage.writemsg("!!! %s\n" % str(e))
+ del e
+
+_global_updates(mytrees, mtimedb["updates"])
+
+print()
+print("Done.")
+print()
diff --git a/portage_with_autodep/bin/glsa-check b/portage_with_autodep/bin/glsa-check
new file mode 100755
index 0000000..2f2d555
--- /dev/null
+++ b/portage_with_autodep/bin/glsa-check
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+# Copyright 2008-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+
+from portage import os
+from portage.output import *
+
+from optparse import OptionGroup, OptionParser
+
+__program__ = "glsa-check"
+__author__ = "Marius Mauch <genone@gentoo.org>"
+__version__ = "1.0"
+
+def cb_version(*args, **kwargs):
+ """Callback for --version"""
+ sys.stderr.write("\n"+ __program__ + ", version " + __version__ + "\n")
+ sys.stderr.write("Author: " + __author__ + "\n")
+ sys.stderr.write("This program is licensed under the GPL, version 2\n\n")
+ sys.exit(0)
+
+# option parsing
+parser = OptionParser(usage="%prog <option> [glsa-list]",
+ version="%prog "+ __version__)
+parser.epilog = "glsa-list can contain an arbitrary number of GLSA ids," \
+ " filenames containing GLSAs or the special identifiers" \
+ " 'all', 'new' and 'affected'"
+
+modes = OptionGroup(parser, "Modes")
+modes.add_option("-l", "--list", action="store_const",
+ const="list", dest="mode",
+ help="List all unapplied GLSA")
+modes.add_option("-d", "--dump", action="store_const",
+ const="dump", dest="mode",
+ help="Show all information about the given GLSA")
+modes.add_option("", "--print", action="store_const",
+ const="dump", dest="mode",
+ help="Alias for --dump")
+modes.add_option("-t", "--test", action="store_const",
+ const="test", dest="mode",
+ help="Test if this system is affected by the given GLSA")
+modes.add_option("-p", "--pretend", action="store_const",
+ const="pretend", dest="mode",
+ help="Show the necessary commands to apply this GLSA")
+modes.add_option("-f", "--fix", action="store_const",
+ const="fix", dest="mode",
+ help="Try to auto-apply this GLSA (experimental)")
+modes.add_option("-i", "--inject", action="store_const", dest="mode",
+ help="Inject the given GLSA into the checkfile")
+modes.add_option("-m", "--mail", action="store_const",
+ const="mail", dest="mode",
+ help="Send a mail with the given GLSAs to the administrator")
+parser.add_option_group(modes)
+
+parser.remove_option("--version")
+parser.add_option("-V", "--version", action="callback",
+ callback=cb_version, help="Some information about this tool")
+parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
+ help="Print more information")
+parser.add_option("-n", "--nocolor", action="callback",
+ callback=lambda *args, **kwargs: nocolor(),
+ help="Disable colors")
+parser.add_option("-e", "--emergelike", action="store_false", dest="least_change",
+ help="Do not use a least-change algorithm")
+parser.add_option("-c", "--cve", action="store_true", dest="list_cve",
+ help="Show CAN ids in listing mode")
+
+options, params = parser.parse_args()
+
+mode = options.mode
+least_change = options.least_change
+list_cve = options.list_cve
+verbose = options.verbose
+
+# Sanity checking
+if mode is None:
+ sys.stderr.write("No mode given: what should I do?\n")
+ parser.print_help()
+ sys.exit(1)
+elif mode != "list" and not params:
+ sys.stderr.write("\nno GLSA given, so we'll do nothing for now. \n")
+ sys.stderr.write("If you want to run on all GLSA please tell me so \n")
+ sys.stderr.write("(specify \"all\" as parameter)\n\n")
+ parser.print_help()
+ sys.exit(1)
+elif mode in ["fix", "inject"] and os.geteuid() != 0:
+ # we need root privileges for write access
+ sys.stderr.write("\nThis tool needs root access to "+options.mode+" this GLSA\n\n")
+ sys.exit(2)
+elif mode == "list" and not params:
+ params.append("new")
+
+# delay this for speed increase
+from portage.glsa import *
+
+vardb = portage.db[portage.settings["ROOT"]]["vartree"].dbapi
+portdb = portage.db["/"]["porttree"].dbapi
+
+# build glsa lists
+completelist = get_glsa_list(portage.settings)
+
+checklist = get_applied_glsas(portage.settings)
+todolist = [e for e in completelist if e not in checklist]
+
+glsalist = []
+if "new" in params:
+ glsalist = todolist
+ params.remove("new")
+
+if "all" in params:
+ glsalist = completelist
+ params.remove("all")
+if "affected" in params:
+ # replaced completelist with todolist on request of wschlich
+ for x in todolist:
+ try:
+ myglsa = Glsa(x, portage.settings, vardb, portdb)
+ except (GlsaTypeException, GlsaFormatException) as e:
+ if verbose:
+ sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (x, e)))
+ continue
+ if myglsa.isVulnerable():
+ glsalist.append(x)
+ params.remove("affected")
+
+# remove invalid parameters
+for p in params[:]:
+ if not (p in completelist or os.path.exists(p)):
+ sys.stderr.write(("(removing %s from parameter list as it isn't a valid GLSA specification)\n" % p))
+ params.remove(p)
+
+glsalist.extend([g for g in params if g not in glsalist])
+
+def summarylist(myglsalist, fd1=sys.stdout, fd2=sys.stderr):
+ fd2.write(white("[A]")+" means this GLSA was already applied,\n")
+ fd2.write(green("[U]")+" means the system is not affected and\n")
+ fd2.write(red("[N]")+" indicates that the system might be affected.\n\n")
+
+ myglsalist.sort()
+ for myid in myglsalist:
+ try:
+ myglsa = Glsa(myid, portage.settings, vardb, portdb)
+ except (GlsaTypeException, GlsaFormatException) as e:
+ if verbose:
+ fd2.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+ continue
+ if myglsa.isApplied():
+ status = "[A]"
+ color = white
+ elif myglsa.isVulnerable():
+ status = "[N]"
+ color = red
+ else:
+ status = "[U]"
+ color = green
+
+ if verbose:
+ access = ("[%-8s] " % myglsa.access)
+ else:
+ access=""
+
+ fd1.write(color(myglsa.nr) + " " + color(status) + " " + color(access) + myglsa.title + " (")
+ if not verbose:
+ for pkg in list(myglsa.packages)[:3]:
+ fd1.write(" " + pkg + " ")
+ if len(myglsa.packages) > 3:
+ fd1.write("... ")
+ else:
+ for pkg in myglsa.packages:
+ mylist = vardb.match(pkg)
+ if len(mylist) > 0:
+ pkg = color(" ".join(mylist))
+ fd1.write(" " + pkg + " ")
+
+ fd1.write(")")
+ if list_cve:
+ fd1.write(" "+(",".join([r[:13] for r in myglsa.references if r[:4] in ["CAN-", "CVE-"]])))
+ fd1.write("\n")
+ return 0
+
+if mode == "list":
+ sys.exit(summarylist(glsalist))
+
+# dump, fix, inject and fix are nearly the same code, only the glsa method call differs
+if mode in ["dump", "fix", "inject", "pretend"]:
+ for myid in glsalist:
+ try:
+ myglsa = Glsa(myid, portage.settings, vardb, portdb)
+ except (GlsaTypeException, GlsaFormatException) as e:
+ if verbose:
+ sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+ continue
+ if mode == "dump":
+ myglsa.dump()
+ elif mode == "fix":
+ sys.stdout.write("fixing "+myid+"\n")
+ mergelist = myglsa.getMergeList(least_change=least_change)
+ for pkg in mergelist:
+ sys.stdout.write(">>> merging "+pkg+"\n")
+ # using emerge for the actual merging as it contains the dependency
+ # code and we want to be consistent in behaviour. Also this functionality
+ # will be integrated in emerge later, so it shouldn't hurt much.
+ emergecmd = "emerge --oneshot " + portage.settings["EMERGE_OPTS"] + " =" + pkg
+ if verbose:
+ sys.stderr.write(emergecmd+"\n")
+ exitcode = os.system(emergecmd)
+ # system() returns the exitcode in the high byte of a 16bit integer
+ if exitcode >= 1<<8:
+ exitcode >>= 8
+ if exitcode:
+ sys.exit(exitcode)
+ myglsa.inject()
+ elif mode == "pretend":
+ sys.stdout.write("Checking GLSA "+myid+"\n")
+ mergelist = myglsa.getMergeList(least_change=least_change)
+ if mergelist:
+ sys.stdout.write("The following updates will be performed for this GLSA:\n")
+ for pkg in mergelist:
+ oldver = None
+ for x in vardb.match(portage.cpv_getkey(pkg)):
+ if vardb.aux_get(x, ["SLOT"]) == portdb.aux_get(pkg, ["SLOT"]):
+ oldver = x
+ if oldver == None:
+ raise ValueError("could not find old version for package %s" % pkg)
+ oldver = oldver[len(portage.cpv_getkey(oldver))+1:]
+ sys.stdout.write(" " + pkg + " (" + oldver + ")\n")
+ else:
+ sys.stdout.write("Nothing to do for this GLSA\n")
+ elif mode == "inject":
+ sys.stdout.write("injecting " + myid + "\n")
+ myglsa.inject()
+ sys.stdout.write("\n")
+ sys.exit(0)
+
+# test is a bit different as Glsa.test() produces no output
+if mode == "test":
+ outputlist = []
+ for myid in glsalist:
+ try:
+ myglsa = Glsa(myid, portage.settings, vardb, portdb)
+ except (GlsaTypeException, GlsaFormatException) as e:
+ if verbose:
+ sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+ continue
+ if myglsa.isVulnerable():
+ outputlist.append(str(myglsa.nr))
+ if len(outputlist) > 0:
+ sys.stderr.write("This system is affected by the following GLSAs:\n")
+ if verbose:
+ summarylist(outputlist)
+ else:
+ sys.stdout.write("\n".join(outputlist)+"\n")
+ else:
+ sys.stderr.write("This system is not affected by any of the listed GLSAs\n")
+ sys.exit(0)
+
+# mail mode as requested by solar
+if mode == "mail":
+ import portage.mail, socket
+ from io import StringIO
+ from email.mime.text import MIMEText
+
+ # color doesn't make any sense for mail
+ nocolor()
+
+ if "PORTAGE_ELOG_MAILURI" in portage.settings:
+ myrecipient = portage.settings["PORTAGE_ELOG_MAILURI"].split()[0]
+ else:
+ myrecipient = "root@localhost"
+
+ if "PORTAGE_ELOG_MAILFROM" in portage.settings:
+ myfrom = portage.settings["PORTAGE_ELOG_MAILFROM"]
+ else:
+ myfrom = "glsa-check"
+
+ mysubject = "[glsa-check] Summary for %s" % socket.getfqdn()
+
+ # need a file object for summarylist()
+ myfd = StringIO()
+ myfd.write("GLSA Summary report for host %s\n" % socket.getfqdn())
+ myfd.write("(Command was: %s)\n\n" % " ".join(sys.argv))
+ summarylist(glsalist, fd1=myfd, fd2=myfd)
+ summary = str(myfd.getvalue())
+ myfd.close()
+
+ myattachments = []
+ for myid in glsalist:
+ try:
+ myglsa = Glsa(myid, portage.settings, vardb, portdb)
+ except (GlsaTypeException, GlsaFormatException) as e:
+ if verbose:
+ sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+ continue
+ myfd = StringIO()
+ myglsa.dump(outstream=myfd)
+ myattachments.append(MIMEText(str(myfd.getvalue()), _charset="utf8"))
+ myfd.close()
+
+ mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, summary, myattachments)
+ portage.mail.send_mail(portage.settings, mymessage)
+
+ sys.exit(0)
+
+# something wrong here, all valid paths are covered with sys.exit()
+sys.stderr.write("nothing more to do\n")
+sys.exit(2)
diff --git a/portage_with_autodep/bin/isolated-functions.sh b/portage_with_autodep/bin/isolated-functions.sh
new file mode 100644
index 0000000..65bb1d5
--- /dev/null
+++ b/portage_with_autodep/bin/isolated-functions.sh
@@ -0,0 +1,630 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# We need this next line for "die" and "assert". It expands
+# It _must_ preceed all the calls to die and assert.
+shopt -s expand_aliases
+alias save_IFS='[ "${IFS:-unset}" != "unset" ] && old_IFS="${IFS}"'
+alias restore_IFS='if [ "${old_IFS:-unset}" != "unset" ]; then IFS="${old_IFS}"; unset old_IFS; else unset IFS; fi'
+
+assert() {
+ local x pipestatus=${PIPESTATUS[*]}
+ for x in $pipestatus ; do
+ [[ $x -eq 0 ]] || die "$@"
+ done
+}
+
+assert_sigpipe_ok() {
+ # When extracting a tar file like this:
+ #
+ # bzip2 -dc foo.tar.bz2 | tar xof -
+ #
+ # For some tar files (see bug #309001), tar will
+ # close its stdin pipe when the decompressor still has
+ # remaining data to be written to its stdout pipe. This
+ # causes the decompressor to be killed by SIGPIPE. In
+ # this case, we want to ignore pipe writers killed by
+ # SIGPIPE, and trust the exit status of tar. We refer
+ # to the bash manual section "3.7.5 Exit Status"
+ # which says, "When a command terminates on a fatal
+ # signal whose number is N, Bash uses the value 128+N
+ # as the exit status."
+
+ local x pipestatus=${PIPESTATUS[*]}
+ for x in $pipestatus ; do
+ # Allow SIGPIPE through (128 + 13)
+ [[ $x -ne 0 && $x -ne ${PORTAGE_SIGPIPE_STATUS:-141} ]] && die "$@"
+ done
+
+ # Require normal success for the last process (tar).
+ [[ $x -eq 0 ]] || die "$@"
+}
+
+shopt -s extdebug
+
+# dump_trace([number of funcs on stack to skip],
+# [whitespacing for filenames],
+# [whitespacing for line numbers])
+dump_trace() {
+ local funcname="" sourcefile="" lineno="" s="yes" n p
+ declare -i strip=${1:-1}
+ local filespacing=$2 linespacing=$3
+
+ # The qa_call() function and anything before it are portage internals
+ # that the user will not be interested in. Therefore, the stack trace
+ # should only show calls that come after qa_call().
+ (( n = ${#FUNCNAME[@]} - 1 ))
+ (( p = ${#BASH_ARGV[@]} ))
+ while (( n > 0 )) ; do
+ [ "${FUNCNAME[${n}]}" == "qa_call" ] && break
+ (( p -= ${BASH_ARGC[${n}]} ))
+ (( n-- ))
+ done
+ if (( n == 0 )) ; then
+ (( n = ${#FUNCNAME[@]} - 1 ))
+ (( p = ${#BASH_ARGV[@]} ))
+ fi
+
+ eerror "Call stack:"
+ while (( n > ${strip} )) ; do
+ funcname=${FUNCNAME[${n} - 1]}
+ sourcefile=$(basename "${BASH_SOURCE[${n}]}")
+ lineno=${BASH_LINENO[${n} - 1]}
+ # Display function arguments
+ args=
+ if [[ -n "${BASH_ARGV[@]}" ]]; then
+ for (( j = 1 ; j <= ${BASH_ARGC[${n} - 1]} ; ++j )); do
+ newarg=${BASH_ARGV[$(( p - j - 1 ))]}
+ args="${args:+${args} }'${newarg}'"
+ done
+ (( p -= ${BASH_ARGC[${n} - 1]} ))
+ fi
+ eerror " $(printf "%${filespacing}s" "${sourcefile}"), line $(printf "%${linespacing}s" "${lineno}"): Called ${funcname}${args:+ ${args}}"
+ (( n-- ))
+ done
+}
+
+nonfatal() {
+ if has "${EAPI:-0}" 0 1 2 3 3_pre2 ; then
+ die "$FUNCNAME() not supported in this EAPI"
+ fi
+ if [[ $# -lt 1 ]]; then
+ die "$FUNCNAME(): Missing argument"
+ fi
+
+ PORTAGE_NONFATAL=1 "$@"
+}
+
+helpers_die() {
+ case "${EAPI:-0}" in
+ 0|1|2|3)
+ echo -e "$@" >&2
+ ;;
+ *)
+ die "$@"
+ ;;
+ esac
+}
+
+die() {
+ if [[ $PORTAGE_NONFATAL -eq 1 ]]; then
+ echo -e " $WARN*$NORMAL ${FUNCNAME[1]}: WARNING: $@" >&2
+ return 1
+ fi
+
+ set +e
+ if [ -n "${QA_INTERCEPTORS}" ] ; then
+ # die was called from inside inherit. We need to clean up
+ # QA_INTERCEPTORS since sed is called below.
+ unset -f ${QA_INTERCEPTORS}
+ unset QA_INTERCEPTORS
+ fi
+ local n filespacing=0 linespacing=0
+ # setup spacing to make output easier to read
+ (( n = ${#FUNCNAME[@]} - 1 ))
+ while (( n > 0 )) ; do
+ [ "${FUNCNAME[${n}]}" == "qa_call" ] && break
+ (( n-- ))
+ done
+ (( n == 0 )) && (( n = ${#FUNCNAME[@]} - 1 ))
+ while (( n > 0 )); do
+ sourcefile=${BASH_SOURCE[${n}]} sourcefile=${sourcefile##*/}
+ lineno=${BASH_LINENO[${n}]}
+ ((filespacing < ${#sourcefile})) && filespacing=${#sourcefile}
+ ((linespacing < ${#lineno})) && linespacing=${#lineno}
+ (( n-- ))
+ done
+
+ # When a helper binary dies automatically in EAPI 4 and later, we don't
+ # get a stack trace, so at least report the phase that failed.
+ local phase_str=
+ [[ -n $EBUILD_PHASE ]] && phase_str=" ($EBUILD_PHASE phase)"
+ eerror "ERROR: $CATEGORY/$PF failed${phase_str}:"
+ eerror " ${*:-(no error message)}"
+ eerror
+ # dump_trace is useless when the main script is a helper binary
+ local main_index
+ (( main_index = ${#BASH_SOURCE[@]} - 1 ))
+ if has ${BASH_SOURCE[$main_index]##*/} ebuild.sh misc-functions.sh ; then
+ dump_trace 2 ${filespacing} ${linespacing}
+ eerror " $(printf "%${filespacing}s" "${BASH_SOURCE[1]##*/}"), line $(printf "%${linespacing}s" "${BASH_LINENO[0]}"): Called die"
+ eerror "The specific snippet of code:"
+ # This scans the file that called die and prints out the logic that
+ # ended in the call to die. This really only handles lines that end
+ # with '|| die' and any preceding lines with line continuations (\).
+ # This tends to be the most common usage though, so let's do it.
+ # Due to the usage of appending to the hold space (even when empty),
+ # we always end up with the first line being a blank (thus the 2nd sed).
+ sed -n \
+ -e "# When we get to the line that failed, append it to the
+ # hold space, move the hold space to the pattern space,
+ # then print out the pattern space and quit immediately
+ ${BASH_LINENO[0]}{H;g;p;q}" \
+ -e '# If this line ends with a line continuation, append it
+ # to the hold space
+ /\\$/H' \
+ -e '# If this line does not end with a line continuation,
+ # erase the line and set the hold buffer to it (thus
+ # erasing the hold buffer in the process)
+ /[^\]$/{s:^.*$::;h}' \
+ "${BASH_SOURCE[1]}" \
+ | sed -e '1d' -e 's:^:RETAIN-LEADING-SPACE:' \
+ | while read -r n ; do eerror " ${n#RETAIN-LEADING-SPACE}" ; done
+ eerror
+ fi
+ eerror "If you need support, post the output of 'emerge --info =$CATEGORY/$PF',"
+ eerror "the complete build log and the output of 'emerge -pqv =$CATEGORY/$PF'."
+ if [[ -n ${EBUILD_OVERLAY_ECLASSES} ]] ; then
+ eerror "This ebuild used the following eclasses from overlays:"
+ local x
+ for x in ${EBUILD_OVERLAY_ECLASSES} ; do
+ eerror " ${x}"
+ done
+ fi
+ if [ "${EMERGE_FROM}" != "binary" ] && \
+ ! has ${EBUILD_PHASE} prerm postrm && \
+ [ "${EBUILD#${PORTDIR}/}" == "${EBUILD}" ] ; then
+ local overlay=${EBUILD%/*}
+ overlay=${overlay%/*}
+ overlay=${overlay%/*}
+ if [[ -n $PORTAGE_REPO_NAME ]] ; then
+ eerror "This ebuild is from an overlay named" \
+ "'$PORTAGE_REPO_NAME': '${overlay}/'"
+ else
+ eerror "This ebuild is from an overlay: '${overlay}/'"
+ fi
+ elif [[ -n $PORTAGE_REPO_NAME && -f "$PORTDIR"/profiles/repo_name ]] ; then
+ local portdir_repo_name=$(<"$PORTDIR"/profiles/repo_name)
+ if [[ -n $portdir_repo_name && \
+ $portdir_repo_name != $PORTAGE_REPO_NAME ]] ; then
+ eerror "This ebuild is from a repository" \
+ "named '$PORTAGE_REPO_NAME'"
+ fi
+ fi
+
+ if [[ "${EBUILD_PHASE/depend}" == "${EBUILD_PHASE}" ]] ; then
+ local x
+ for x in $EBUILD_DEATH_HOOKS; do
+ ${x} "$@" >&2 1>&2
+ done
+ > "$PORTAGE_BUILDDIR/.die_hooks"
+ fi
+
+ [[ -n ${PORTAGE_LOG_FILE} ]] \
+ && eerror "The complete build log is located at '${PORTAGE_LOG_FILE}'."
+ if [ -f "${T}/environment" ] ; then
+ eerror "The ebuild environment file is located at '${T}/environment'."
+ elif [ -d "${T}" ] ; then
+ {
+ set
+ export
+ } > "${T}/die.env"
+ eerror "The ebuild environment file is located at '${T}/die.env'."
+ fi
+ eerror "S: '${S}'"
+
+ [[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
+ [[ -n $PORTAGE_IPC_DAEMON ]] && "$PORTAGE_BIN_PATH"/ebuild-ipc exit 1
+
+ # subshell die support
+ [[ $BASHPID = $EBUILD_MASTER_PID ]] || kill -s SIGTERM $EBUILD_MASTER_PID
+ exit 1
+}
+
+# We need to implement diefunc() since environment.bz2 files contain
+# calls to it (due to alias expansion).
+diefunc() {
+ die "${@}"
+}
+
+quiet_mode() {
+ [[ ${PORTAGE_QUIET} -eq 1 ]]
+}
+
+vecho() {
+ quiet_mode || echo "$@"
+}
+
+# Internal logging function, don't use this in ebuilds
+elog_base() {
+ local messagetype
+ [ -z "${1}" -o -z "${T}" -o ! -d "${T}/logging" ] && return 1
+ case "${1}" in
+ INFO|WARN|ERROR|LOG|QA)
+ messagetype="${1}"
+ shift
+ ;;
+ *)
+ vecho -e " ${BAD}*${NORMAL} Invalid use of internal function elog_base(), next message will not be logged"
+ return 1
+ ;;
+ esac
+ echo -e "$@" | while read -r ; do
+ echo "$messagetype $REPLY" >> \
+ "${T}/logging/${EBUILD_PHASE:-other}"
+ done
+ return 0
+}
+
+eqawarn() {
+ elog_base QA "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -e "$@" | while read -r ; do
+ vecho " $WARN*$NORMAL $REPLY" >&2
+ done
+ LAST_E_CMD="eqawarn"
+ return 0
+}
+
+elog() {
+ elog_base LOG "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -e "$@" | while read -r ; do
+ echo " $GOOD*$NORMAL $REPLY"
+ done
+ LAST_E_CMD="elog"
+ return 0
+}
+
+esyslog() {
+ local pri=
+ local tag=
+
+ if [ -x /usr/bin/logger ]
+ then
+ pri="$1"
+ tag="$2"
+
+ shift 2
+ [ -z "$*" ] && return 0
+
+ /usr/bin/logger -p "${pri}" -t "${tag}" -- "$*"
+ fi
+
+ return 0
+}
+
+einfo() {
+ elog_base INFO "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -e "$@" | while read -r ; do
+ echo " $GOOD*$NORMAL $REPLY"
+ done
+ LAST_E_CMD="einfo"
+ return 0
+}
+
+einfon() {
+ elog_base INFO "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -ne " ${GOOD}*${NORMAL} $*"
+ LAST_E_CMD="einfon"
+ return 0
+}
+
+ewarn() {
+ elog_base WARN "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -e "$@" | while read -r ; do
+ echo " $WARN*$NORMAL $RC_INDENTATION$REPLY" >&2
+ done
+ LAST_E_CMD="ewarn"
+ return 0
+}
+
+eerror() {
+ elog_base ERROR "$*"
+ [[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+ echo -e "$@" | while read -r ; do
+ echo " $BAD*$NORMAL $RC_INDENTATION$REPLY" >&2
+ done
+ LAST_E_CMD="eerror"
+ return 0
+}
+
+ebegin() {
+ local msg="$*" dots spaces=${RC_DOT_PATTERN//?/ }
+ if [[ -n ${RC_DOT_PATTERN} ]] ; then
+ dots=$(printf "%$(( COLS - 3 - ${#RC_INDENTATION} - ${#msg} - 7 ))s" '')
+ dots=${dots//${spaces}/${RC_DOT_PATTERN}}
+ msg="${msg}${dots}"
+ else
+ msg="${msg} ..."
+ fi
+ einfon "${msg}"
+ [[ ${RC_ENDCOL} == "yes" ]] && echo
+ LAST_E_LEN=$(( 3 + ${#RC_INDENTATION} + ${#msg} ))
+ LAST_E_CMD="ebegin"
+ return 0
+}
+
+_eend() {
+ local retval=${1:-0} efunc=${2:-eerror} msg
+ shift 2
+
+ if [[ ${retval} == "0" ]] ; then
+ msg="${BRACKET}[ ${GOOD}ok${BRACKET} ]${NORMAL}"
+ else
+ if [[ -n $* ]] ; then
+ ${efunc} "$*"
+ fi
+ msg="${BRACKET}[ ${BAD}!!${BRACKET} ]${NORMAL}"
+ fi
+
+ if [[ ${RC_ENDCOL} == "yes" ]] ; then
+ echo -e "${ENDCOL} ${msg}"
+ else
+ [[ ${LAST_E_CMD} == ebegin ]] || LAST_E_LEN=0
+ printf "%$(( COLS - LAST_E_LEN - 7 ))s%b\n" '' "${msg}"
+ fi
+
+ return ${retval}
+}
+
+eend() {
+ local retval=${1:-0}
+ shift
+
+ _eend ${retval} eerror "$*"
+
+ LAST_E_CMD="eend"
+ return ${retval}
+}
+
+KV_major() {
+ [[ -z $1 ]] && return 1
+
+ local KV=$@
+ echo "${KV%%.*}"
+}
+
+KV_minor() {
+ [[ -z $1 ]] && return 1
+
+ local KV=$@
+ KV=${KV#*.}
+ echo "${KV%%.*}"
+}
+
+KV_micro() {
+ [[ -z $1 ]] && return 1
+
+ local KV=$@
+ KV=${KV#*.*.}
+ echo "${KV%%[^[:digit:]]*}"
+}
+
+KV_to_int() {
+ [[ -z $1 ]] && return 1
+
+ local KV_MAJOR=$(KV_major "$1")
+ local KV_MINOR=$(KV_minor "$1")
+ local KV_MICRO=$(KV_micro "$1")
+ local KV_int=$(( KV_MAJOR * 65536 + KV_MINOR * 256 + KV_MICRO ))
+
+ # We make version 2.2.0 the minimum version we will handle as
+ # a sanity check ... if its less, we fail ...
+ if [[ ${KV_int} -ge 131584 ]] ; then
+ echo "${KV_int}"
+ return 0
+ fi
+
+ return 1
+}
+
+_RC_GET_KV_CACHE=""
+get_KV() {
+ [[ -z ${_RC_GET_KV_CACHE} ]] \
+ && _RC_GET_KV_CACHE=$(uname -r)
+
+ echo $(KV_to_int "${_RC_GET_KV_CACHE}")
+
+ return $?
+}
+
+unset_colors() {
+ COLS=80
+ ENDCOL=
+
+ GOOD=
+ WARN=
+ BAD=
+ NORMAL=
+ HILITE=
+ BRACKET=
+}
+
+set_colors() {
+ COLS=${COLUMNS:-0} # bash's internal COLUMNS variable
+ (( COLS == 0 )) && COLS=$(set -- $(stty size 2>/dev/null) ; echo $2)
+ (( COLS > 0 )) || (( COLS = 80 ))
+
+ # Now, ${ENDCOL} will move us to the end of the
+ # column; irregardless of character width
+ ENDCOL=$'\e[A\e['$(( COLS - 8 ))'C'
+ if [ -n "${PORTAGE_COLORMAP}" ] ; then
+ eval ${PORTAGE_COLORMAP}
+ else
+ GOOD=$'\e[32;01m'
+ WARN=$'\e[33;01m'
+ BAD=$'\e[31;01m'
+ HILITE=$'\e[36;01m'
+ BRACKET=$'\e[34;01m'
+ fi
+ NORMAL=$'\e[0m'
+}
+
+RC_ENDCOL="yes"
+RC_INDENTATION=''
+RC_DEFAULT_INDENT=2
+RC_DOT_PATTERN=''
+
+case "${NOCOLOR:-false}" in
+ yes|true)
+ unset_colors
+ ;;
+ no|false)
+ set_colors
+ ;;
+esac
+
+if [[ -z ${USERLAND} ]] ; then
+ case $(uname -s) in
+ *BSD|DragonFly)
+ export USERLAND="BSD"
+ ;;
+ *)
+ export USERLAND="GNU"
+ ;;
+ esac
+fi
+
+if [[ -z ${XARGS} ]] ; then
+ case ${USERLAND} in
+ BSD)
+ export XARGS="xargs"
+ ;;
+ *)
+ export XARGS="xargs -r"
+ ;;
+ esac
+fi
+
+hasq() {
+ has $EBUILD_PHASE prerm postrm || eqawarn \
+ "QA Notice: The 'hasq' function is deprecated (replaced by 'has')"
+ has "$@"
+}
+
+hasv() {
+ if has "$@" ; then
+ echo "$1"
+ return 0
+ fi
+ return 1
+}
+
+has() {
+ local needle=$1
+ shift
+
+ local x
+ for x in "$@"; do
+ [ "${x}" = "${needle}" ] && return 0
+ done
+ return 1
+}
+
+# @FUNCTION: save_ebuild_env
+# @DESCRIPTION:
+# echo the current environment to stdout, filtering out redundant info.
+#
+# --exclude-init-phases causes pkg_nofetch and src_* phase functions to
+# be excluded from the output. These function are not needed for installation
+# or removal of the packages, and can therefore be safely excluded.
+#
+save_ebuild_env() {
+ (
+
+ if has --exclude-init-phases $* ; then
+ unset S _E_DOCDESTTREE_ _E_EXEDESTTREE_
+ if [[ -n $PYTHONPATH ]] ; then
+ export PYTHONPATH=${PYTHONPATH/${PORTAGE_PYM_PATH}:}
+ [[ -z $PYTHONPATH ]] && unset PYTHONPATH
+ fi
+ fi
+
+ # misc variables inherited from the calling environment
+ unset COLORTERM DISPLAY EDITOR LESS LESSOPEN LOGNAME LS_COLORS PAGER \
+ TERM TERMCAP USER ftp_proxy http_proxy no_proxy
+
+ # other variables inherited from the calling environment
+ unset CVS_RSH ECHANGELOG_USER GPG_AGENT_INFO \
+ SSH_AGENT_PID SSH_AUTH_SOCK STY WINDOW XAUTHORITY
+
+ # CCACHE and DISTCC config
+ unset ${!CCACHE_*} ${!DISTCC_*}
+
+ # There's no need to bloat environment.bz2 with internally defined
+ # functions and variables, so filter them out if possible.
+
+ for x in pkg_setup pkg_nofetch src_unpack src_prepare src_configure \
+ src_compile src_test src_install pkg_preinst pkg_postinst \
+ pkg_prerm pkg_postrm ; do
+ unset -f default_$x _eapi{0,1,2,3,4}_$x
+ done
+ unset x
+
+ unset -f assert assert_sigpipe_ok dump_trace die diefunc \
+ quiet_mode vecho elog_base eqawarn elog \
+ esyslog einfo einfon ewarn eerror ebegin _eend eend KV_major \
+ KV_minor KV_micro KV_to_int get_KV unset_colors set_colors has \
+ has_phase_defined_up_to \
+ hasg hasgq hasv hasq qa_source qa_call \
+ addread addwrite adddeny addpredict _sb_append_var \
+ lchown lchgrp esyslog use usev useq has_version portageq \
+ best_version use_with use_enable register_die_hook \
+ keepdir unpack strip_duplicate_slashes econf einstall \
+ dyn_setup dyn_unpack dyn_clean into insinto exeinto docinto \
+ insopts diropts exeopts libopts docompress \
+ abort_handler abort_prepare abort_configure abort_compile \
+ abort_test abort_install dyn_prepare dyn_configure \
+ dyn_compile dyn_test dyn_install \
+ dyn_preinst dyn_help debug-print debug-print-function \
+ debug-print-section inherit EXPORT_FUNCTIONS remove_path_entry \
+ save_ebuild_env filter_readonly_variables preprocess_ebuild_env \
+ set_unless_changed unset_unless_changed source_all_bashrcs \
+ ebuild_main ebuild_phase ebuild_phase_with_hooks \
+ _ebuild_arg_to_phase _ebuild_phase_funcs default \
+ _pipestatus \
+ ${QA_INTERCEPTORS}
+
+ # portage config variables and variables set directly by portage
+ unset ACCEPT_LICENSE BAD BRACKET BUILD_PREFIX COLS \
+ DISTCC_DIR DISTDIR DOC_SYMLINKS_DIR \
+ EBUILD_FORCE_TEST EBUILD_MASTER_PID \
+ ECLASS_DEPTH ENDCOL FAKEROOTKEY \
+ GOOD HILITE HOME \
+ LAST_E_CMD LAST_E_LEN LD_PRELOAD MISC_FUNCTIONS_ARGS MOPREFIX \
+ NOCOLOR NORMAL PKGDIR PKGUSE PKG_LOGDIR PKG_TMPDIR \
+ PORTAGE_BASHRCS_SOURCED PORTAGE_NONFATAL PORTAGE_QUIET \
+ PORTAGE_SANDBOX_DENY PORTAGE_SANDBOX_PREDICT \
+ PORTAGE_SANDBOX_READ PORTAGE_SANDBOX_WRITE PREROOTPATH \
+ QA_INTERCEPTORS \
+ RC_DEFAULT_INDENT RC_DOT_PATTERN RC_ENDCOL RC_INDENTATION \
+ ROOT ROOTPATH RPMDIR TEMP TMP TMPDIR USE_EXPAND \
+ WARN XARGS _RC_GET_KV_CACHE
+
+ # user config variables
+ unset DOC_SYMLINKS_DIR INSTALL_MASK PKG_INSTALL_MASK
+
+ declare -p
+ declare -fp
+ if [[ ${BASH_VERSINFO[0]} == 3 ]]; then
+ export
+ fi
+ )
+}
+
+true
diff --git a/portage_with_autodep/bin/lock-helper.py b/portage_with_autodep/bin/lock-helper.py
new file mode 100755
index 0000000..5f3ea9f
--- /dev/null
+++ b/portage_with_autodep/bin/lock-helper.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import sys
+sys.path.insert(0, os.environ['PORTAGE_PYM_PATH'])
+import portage
+
+def main(args):
+
+ if args and sys.hexversion < 0x3000000 and not isinstance(args[0], unicode):
+ for i, x in enumerate(args):
+ args[i] = portage._unicode_decode(x, errors='strict')
+
+ # Make locks quiet since unintended locking messages displayed on
+ # stdout would corrupt the intended output of this program.
+ portage.locks._quiet = True
+ lock_obj = portage.locks.lockfile(args[0], wantnewlockfile=True)
+ sys.stdout.write('\0')
+ sys.stdout.flush()
+ sys.stdin.read(1)
+ portage.locks.unlockfile(lock_obj)
+ return portage.os.EX_OK
+
+if __name__ == "__main__":
+ rval = main(sys.argv[1:])
+ sys.exit(rval)
diff --git a/portage_with_autodep/bin/misc-functions.sh b/portage_with_autodep/bin/misc-functions.sh
new file mode 100755
index 0000000..8c191ff
--- /dev/null
+++ b/portage_with_autodep/bin/misc-functions.sh
@@ -0,0 +1,1002 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# Miscellaneous shell functions that make use of the ebuild env but don't need
+# to be included directly in ebuild.sh.
+#
+# We're sourcing ebuild.sh here so that we inherit all of it's goodness,
+# including bashrc trickery. This approach allows us to do our miscellaneous
+# shell work withing the same env that ebuild.sh has, but without polluting
+# ebuild.sh itself with unneeded logic and shell code.
+#
+# XXX hack: clear the args so ebuild.sh doesn't see them
+MISC_FUNCTIONS_ARGS="$@"
+shift $#
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}/ebuild.sh"
+
+install_symlink_html_docs() {
+ cd "${D}" || die "cd failed"
+ #symlink the html documentation (if DOC_SYMLINKS_DIR is set in make.conf)
+ if [ -n "${DOC_SYMLINKS_DIR}" ] ; then
+ local mydocdir docdir
+ for docdir in "${HTMLDOC_DIR:-does/not/exist}" "${PF}/html" "${PF}/HTML" "${P}/html" "${P}/HTML" ; do
+ if [ -d "usr/share/doc/${docdir}" ] ; then
+ mydocdir="/usr/share/doc/${docdir}"
+ fi
+ done
+ if [ -n "${mydocdir}" ] ; then
+ local mysympath
+ if [ -z "${SLOT}" -o "${SLOT}" = "0" ] ; then
+ mysympath="${DOC_SYMLINKS_DIR}/${CATEGORY}/${PN}"
+ else
+ mysympath="${DOC_SYMLINKS_DIR}/${CATEGORY}/${PN}-${SLOT}"
+ fi
+ einfo "Symlinking ${mysympath} to the HTML documentation"
+ dodir "${DOC_SYMLINKS_DIR}/${CATEGORY}"
+ dosym "${mydocdir}" "${mysympath}"
+ fi
+ fi
+}
+
+# replacement for "readlink -f" or "realpath"
+canonicalize() {
+ local f=$1 b n=10 wd=$(pwd)
+ while (( n-- > 0 )); do
+ while [[ ${f: -1} = / && ${#f} -gt 1 ]]; do
+ f=${f%/}
+ done
+ b=${f##*/}
+ cd "${f%"${b}"}" 2>/dev/null || break
+ if [[ ! -L ${b} ]]; then
+ f=$(pwd -P)
+ echo "${f%/}/${b}"
+ cd "${wd}"
+ return 0
+ fi
+ f=$(readlink "${b}")
+ done
+ cd "${wd}"
+ return 1
+}
+
+prepcompress() {
+ local -a include exclude incl_d incl_f
+ local f g i real_f real_d
+
+ # Canonicalize path names and check for their existence.
+ real_d=$(canonicalize "${D}")
+ for (( i = 0; i < ${#PORTAGE_DOCOMPRESS[@]}; i++ )); do
+ real_f=$(canonicalize "${D}${PORTAGE_DOCOMPRESS[i]}")
+ f=${real_f#"${real_d}"}
+ if [[ ${real_f} != "${f}" ]] && [[ -d ${real_f} || -f ${real_f} ]]
+ then
+ include[${#include[@]}]=${f:-/}
+ elif [[ ${i} -ge 3 ]]; then
+ ewarn "prepcompress:" \
+ "ignoring nonexistent path '${PORTAGE_DOCOMPRESS[i]}'"
+ fi
+ done
+ for (( i = 0; i < ${#PORTAGE_DOCOMPRESS_SKIP[@]}; i++ )); do
+ real_f=$(canonicalize "${D}${PORTAGE_DOCOMPRESS_SKIP[i]}")
+ f=${real_f#"${real_d}"}
+ if [[ ${real_f} != "${f}" ]] && [[ -d ${real_f} || -f ${real_f} ]]
+ then
+ exclude[${#exclude[@]}]=${f:-/}
+ elif [[ ${i} -ge 1 ]]; then
+ ewarn "prepcompress:" \
+ "ignoring nonexistent path '${PORTAGE_DOCOMPRESS_SKIP[i]}'"
+ fi
+ done
+
+ # Remove redundant entries from lists.
+ # For the include list, remove any entries that are:
+ # a) contained in a directory in the include or exclude lists, or
+ # b) identical with an entry in the exclude list.
+ for (( i = ${#include[@]} - 1; i >= 0; i-- )); do
+ f=${include[i]}
+ for g in "${include[@]}"; do
+ if [[ ${f} == "${g%/}"/* ]]; then
+ unset include[i]
+ continue 2
+ fi
+ done
+ for g in "${exclude[@]}"; do
+ if [[ ${f} = "${g}" || ${f} == "${g%/}"/* ]]; then
+ unset include[i]
+ continue 2
+ fi
+ done
+ done
+ # For the exclude list, remove any entries that are:
+ # a) contained in a directory in the exclude list, or
+ # b) _not_ contained in a directory in the include list.
+ for (( i = ${#exclude[@]} - 1; i >= 0; i-- )); do
+ f=${exclude[i]}
+ for g in "${exclude[@]}"; do
+ if [[ ${f} == "${g%/}"/* ]]; then
+ unset exclude[i]
+ continue 2
+ fi
+ done
+ for g in "${include[@]}"; do
+ [[ ${f} == "${g%/}"/* ]] && continue 2
+ done
+ unset exclude[i]
+ done
+
+ # Split the include list into directories and files
+ for f in "${include[@]}"; do
+ if [[ -d ${D}${f} ]]; then
+ incl_d[${#incl_d[@]}]=${f}
+ else
+ incl_f[${#incl_f[@]}]=${f}
+ fi
+ done
+
+ # Queue up for compression.
+ # ecompress{,dir} doesn't like to be called with empty argument lists.
+ [[ ${#incl_d[@]} -gt 0 ]] && ecompressdir --queue "${incl_d[@]}"
+ [[ ${#incl_f[@]} -gt 0 ]] && ecompress --queue "${incl_f[@]/#/${D}}"
+ [[ ${#exclude[@]} -gt 0 ]] && ecompressdir --ignore "${exclude[@]}"
+ return 0
+}
+
+install_qa_check() {
+ local f x
+
+ cd "${D}" || die "cd failed"
+
+ export STRIP_MASK
+ prepall
+ has "${EAPI}" 0 1 2 3 || prepcompress
+ ecompressdir --dequeue
+ ecompress --dequeue
+
+ f=
+ for x in etc/app-defaults usr/man usr/info usr/X11R6 usr/doc usr/locale ; do
+ [[ -d $D/$x ]] && f+=" $x\n"
+ done
+
+ if [[ -n $f ]] ; then
+ eqawarn "QA Notice: This ebuild installs into the following deprecated directories:"
+ eqawarn
+ eqawarn "$f"
+ fi
+
+ # Now we look for all world writable files.
+ local i
+ for i in $(find "${D}/" -type f -perm -2); do
+ vecho "QA Security Notice:"
+ vecho "- ${i:${#D}:${#i}} will be a world writable file."
+ vecho "- This may or may not be a security problem, most of the time it is one."
+ vecho "- Please double check that $PF really needs a world writeable bit and file bugs accordingly."
+ sleep 1
+ done
+
+ if type -P scanelf > /dev/null && ! has binchecks ${RESTRICT}; then
+ local qa_var insecure_rpath=0 tmp_quiet=${PORTAGE_QUIET}
+ local x
+
+ # display warnings when using stricter because we die afterwards
+ if has stricter ${FEATURES} ; then
+ unset PORTAGE_QUIET
+ fi
+
+ # Make sure we disallow insecure RUNPATH/RPATHs.
+ # 1) References to PORTAGE_BUILDDIR are banned because it's a
+ # security risk. We don't want to load files from a
+ # temporary directory.
+ # 2) If ROOT != "/", references to ROOT are banned because
+ # that directory won't exist on the target system.
+ # 3) Null paths are banned because the loader will search $PWD when
+ # it finds null paths.
+ local forbidden_dirs="${PORTAGE_BUILDDIR}"
+ if [[ -n "${ROOT}" && "${ROOT}" != "/" ]]; then
+ forbidden_dirs+=" ${ROOT}"
+ fi
+ local dir l rpath_files=$(scanelf -F '%F:%r' -qBR "${D}")
+ f=""
+ for dir in ${forbidden_dirs}; do
+ for l in $(echo "${rpath_files}" | grep -E ":${dir}|::|: "); do
+ f+=" ${l%%:*}\n"
+ if ! has stricter ${FEATURES}; then
+ vecho "Auto fixing rpaths for ${l%%:*}"
+ TMPDIR="${dir}" scanelf -BXr "${l%%:*}" -o /dev/null
+ fi
+ done
+ done
+
+ # Reject set*id binaries with $ORIGIN in RPATH #260331
+ x=$(
+ find "${D}" -type f \( -perm -u+s -o -perm -g+s \) -print0 | \
+ xargs -0 scanelf -qyRF '%r %p' | grep '$ORIGIN'
+ )
+
+ # Print QA notice.
+ if [[ -n ${f}${x} ]] ; then
+ vecho -ne '\n'
+ eqawarn "QA Notice: The following files contain insecure RUNPATHs"
+ eqawarn " Please file a bug about this at http://bugs.gentoo.org/"
+ eqawarn " with the maintaining herd of the package."
+ eqawarn "${f}${f:+${x:+\n}}${x}"
+ vecho -ne '\n'
+ if [[ -n ${x} ]] || has stricter ${FEATURES} ; then
+ insecure_rpath=1
+ fi
+ fi
+
+ # TEXTRELs are baaaaaaaad
+ # Allow devs to mark things as ignorable ... e.g. things that are
+ # binary-only and upstream isn't cooperating (nvidia-glx) ... we
+ # allow ebuild authors to set QA_TEXTRELS_arch and QA_TEXTRELS ...
+ # the former overrides the latter ... regexes allowed ! :)
+ qa_var="QA_TEXTRELS_${ARCH/-/_}"
+ [[ -n ${!qa_var} ]] && QA_TEXTRELS=${!qa_var}
+ [[ -n ${QA_STRICT_TEXTRELS} ]] && QA_TEXTRELS=""
+ export QA_TEXTRELS="${QA_TEXTRELS} lib*/modules/*.ko"
+ f=$(scanelf -qyRF '%t %p' "${D}" | grep -v 'usr/lib/debug/')
+ if [[ -n ${f} ]] ; then
+ scanelf -qyRAF '%T %p' "${PORTAGE_BUILDDIR}"/ &> "${T}"/scanelf-textrel.log
+ vecho -ne '\n'
+ eqawarn "QA Notice: The following files contain runtime text relocations"
+ eqawarn " Text relocations force the dynamic linker to perform extra"
+ eqawarn " work at startup, waste system resources, and may pose a security"
+ eqawarn " risk. On some architectures, the code may not even function"
+ eqawarn " properly, if at all."
+ eqawarn " For more information, see http://hardened.gentoo.org/pic-fix-guide.xml"
+ eqawarn " Please include the following list of files in your report:"
+ eqawarn "${f}"
+ vecho -ne '\n'
+ die_msg="${die_msg} textrels,"
+ sleep 1
+ fi
+
+ # Also, executable stacks only matter on linux (and just glibc atm ...)
+ f=""
+ case ${CTARGET:-${CHOST}} in
+ *-linux-gnu*)
+ # Check for files with executable stacks, but only on arches which
+ # are supported at the moment. Keep this list in sync with
+ # http://hardened.gentoo.org/gnu-stack.xml (Arch Status)
+ case ${CTARGET:-${CHOST}} in
+ arm*|i?86*|ia64*|m68k*|s390*|sh*|x86_64*)
+ # Allow devs to mark things as ignorable ... e.g. things
+ # that are binary-only and upstream isn't cooperating ...
+ # we allow ebuild authors to set QA_EXECSTACK_arch and
+ # QA_EXECSTACK ... the former overrides the latter ...
+ # regexes allowed ! :)
+
+ qa_var="QA_EXECSTACK_${ARCH/-/_}"
+ [[ -n ${!qa_var} ]] && QA_EXECSTACK=${!qa_var}
+ [[ -n ${QA_STRICT_EXECSTACK} ]] && QA_EXECSTACK=""
+ qa_var="QA_WX_LOAD_${ARCH/-/_}"
+ [[ -n ${!qa_var} ]] && QA_WX_LOAD=${!qa_var}
+ [[ -n ${QA_STRICT_WX_LOAD} ]] && QA_WX_LOAD=""
+ export QA_EXECSTACK="${QA_EXECSTACK} lib*/modules/*.ko"
+ export QA_WX_LOAD="${QA_WX_LOAD} lib*/modules/*.ko"
+ f=$(scanelf -qyRAF '%e %p' "${D}" | grep -v 'usr/lib/debug/')
+ ;;
+ esac
+ ;;
+ esac
+ if [[ -n ${f} ]] ; then
+ # One more pass to help devs track down the source
+ scanelf -qyRAF '%e %p' "${PORTAGE_BUILDDIR}"/ &> "${T}"/scanelf-execstack.log
+ vecho -ne '\n'
+ eqawarn "QA Notice: The following files contain writable and executable sections"
+ eqawarn " Files with such sections will not work properly (or at all!) on some"
+ eqawarn " architectures/operating systems. A bug should be filed at"
+ eqawarn " http://bugs.gentoo.org/ to make sure the issue is fixed."
+ eqawarn " For more information, see http://hardened.gentoo.org/gnu-stack.xml"
+ eqawarn " Please include the following list of files in your report:"
+ eqawarn " Note: Bugs should be filed for the respective maintainers"
+ eqawarn " of the package in question and not hardened@g.o."
+ eqawarn "${f}"
+ vecho -ne '\n'
+ die_msg="${die_msg} execstacks"
+ sleep 1
+ fi
+
+ # Check for files built without respecting LDFLAGS
+ if [[ "${LDFLAGS}" == *,--hash-style=gnu* ]] && [[ "${PN}" != *-bin ]] ; then
+ qa_var="QA_DT_HASH_${ARCH/-/_}"
+ eval "[[ -n \${!qa_var} ]] && QA_DT_HASH=(\"\${${qa_var}[@]}\")"
+ f=$(scanelf -qyRF '%k %p' -k .hash "${D}" | sed -e "s:\.hash ::")
+ if [[ -n ${f} ]] ; then
+ echo "${f}" > "${T}"/scanelf-ignored-LDFLAGS.log
+ if [ "${QA_STRICT_DT_HASH-unset}" == unset ] ; then
+ if [[ ${#QA_DT_HASH[@]} -gt 1 ]] ; then
+ for x in "${QA_DT_HASH[@]}" ; do
+ sed -e "s#^${x#/}\$##" -i "${T}"/scanelf-ignored-LDFLAGS.log
+ done
+ else
+ local shopts=$-
+ set -o noglob
+ for x in ${QA_DT_HASH} ; do
+ sed -e "s#^${x#/}\$##" -i "${T}"/scanelf-ignored-LDFLAGS.log
+ done
+ set +o noglob
+ set -${shopts}
+ fi
+ fi
+ # Filter anything under /usr/lib/debug/ in order to avoid
+ # duplicate warnings for splitdebug files.
+ sed -e "s#^usr/lib/debug/.*##" -e "/^\$/d" -e "s#^#/#" \
+ -i "${T}"/scanelf-ignored-LDFLAGS.log
+ f=$(<"${T}"/scanelf-ignored-LDFLAGS.log)
+ if [[ -n ${f} ]] ; then
+ vecho -ne '\n'
+ eqawarn "${BAD}QA Notice: Files built without respecting LDFLAGS have been detected${NORMAL}"
+ eqawarn " Please include the following list of files in your report:"
+ eqawarn "${f}"
+ vecho -ne '\n'
+ sleep 1
+ else
+ rm -f "${T}"/scanelf-ignored-LDFLAGS.log
+ fi
+ fi
+ fi
+
+ # Save NEEDED information after removing self-contained providers
+ rm -f "$PORTAGE_BUILDDIR"/build-info/NEEDED{,.ELF.2}
+ scanelf -qyRF '%a;%p;%S;%r;%n' "${D}" | { while IFS= read -r l; do
+ arch=${l%%;*}; l=${l#*;}
+ obj="/${l%%;*}"; l=${l#*;}
+ soname=${l%%;*}; l=${l#*;}
+ rpath=${l%%;*}; l=${l#*;}; [ "${rpath}" = " - " ] && rpath=""
+ needed=${l%%;*}; l=${l#*;}
+ if [ -z "${rpath}" -o -n "${rpath//*ORIGIN*}" ]; then
+ # object doesn't contain $ORIGIN in its runpath attribute
+ echo "${obj} ${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
+ echo "${arch:3};${obj};${soname};${rpath};${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2
+ else
+ dir=${obj%/*}
+ # replace $ORIGIN with the dirname of the current object for the lookup
+ opath=$(echo :${rpath}: | sed -e "s#.*:\(.*\)\$ORIGIN\(.*\):.*#\1${dir}\2#")
+ sneeded=$(echo ${needed} | tr , ' ')
+ rneeded=""
+ for lib in ${sneeded}; do
+ found=0
+ for path in ${opath//:/ }; do
+ [ -e "${D}/${path}/${lib}" ] && found=1 && break
+ done
+ [ "${found}" -eq 0 ] && rneeded="${rneeded},${lib}"
+ done
+ rneeded=${rneeded:1}
+ if [ -n "${rneeded}" ]; then
+ echo "${obj} ${rneeded}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
+ echo "${arch:3};${obj};${soname};${rpath};${rneeded}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2
+ fi
+ fi
+ done }
+
+ if [[ ${insecure_rpath} -eq 1 ]] ; then
+ die "Aborting due to serious QA concerns with RUNPATH/RPATH"
+ elif [[ -n ${die_msg} ]] && has stricter ${FEATURES} ; then
+ die "Aborting due to QA concerns: ${die_msg}"
+ fi
+
+ # Check for shared libraries lacking SONAMEs
+ qa_var="QA_SONAME_${ARCH/-/_}"
+ eval "[[ -n \${!qa_var} ]] && QA_SONAME=(\"\${${qa_var}[@]}\")"
+ f=$(scanelf -ByF '%S %p' "${D}"{,usr/}lib*/lib*.so* | gawk '$2 == "" { print }' | sed -e "s:^[[:space:]]${D}:/:")
+ if [[ -n ${f} ]] ; then
+ echo "${f}" > "${T}"/scanelf-missing-SONAME.log
+ if [[ "${QA_STRICT_SONAME-unset}" == unset ]] ; then
+ if [[ ${#QA_SONAME[@]} -gt 1 ]] ; then
+ for x in "${QA_SONAME[@]}" ; do
+ sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-SONAME.log
+ done
+ else
+ local shopts=$-
+ set -o noglob
+ for x in ${QA_SONAME} ; do
+ sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-SONAME.log
+ done
+ set +o noglob
+ set -${shopts}
+ fi
+ fi
+ sed -e "/^\$/d" -i "${T}"/scanelf-missing-SONAME.log
+ f=$(<"${T}"/scanelf-missing-SONAME.log)
+ if [[ -n ${f} ]] ; then
+ vecho -ne '\n'
+ eqawarn "QA Notice: The following shared libraries lack a SONAME"
+ eqawarn "${f}"
+ vecho -ne '\n'
+ sleep 1
+ else
+ rm -f "${T}"/scanelf-missing-SONAME.log
+ fi
+ fi
+
+ # Check for shared libraries lacking NEEDED entries
+ qa_var="QA_DT_NEEDED_${ARCH/-/_}"
+ eval "[[ -n \${!qa_var} ]] && QA_DT_NEEDED=(\"\${${qa_var}[@]}\")"
+ f=$(scanelf -ByF '%n %p' "${D}"{,usr/}lib*/lib*.so* | gawk '$2 == "" { print }' | sed -e "s:^[[:space:]]${D}:/:")
+ if [[ -n ${f} ]] ; then
+ echo "${f}" > "${T}"/scanelf-missing-NEEDED.log
+ if [[ "${QA_STRICT_DT_NEEDED-unset}" == unset ]] ; then
+ if [[ ${#QA_DT_NEEDED[@]} -gt 1 ]] ; then
+ for x in "${QA_DT_NEEDED[@]}" ; do
+ sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-NEEDED.log
+ done
+ else
+ local shopts=$-
+ set -o noglob
+ for x in ${QA_DT_NEEDED} ; do
+ sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-NEEDED.log
+ done
+ set +o noglob
+ set -${shopts}
+ fi
+ fi
+ sed -e "/^\$/d" -i "${T}"/scanelf-missing-NEEDED.log
+ f=$(<"${T}"/scanelf-missing-NEEDED.log)
+ if [[ -n ${f} ]] ; then
+ vecho -ne '\n'
+ eqawarn "QA Notice: The following shared libraries lack NEEDED entries"
+ eqawarn "${f}"
+ vecho -ne '\n'
+ sleep 1
+ else
+ rm -f "${T}"/scanelf-missing-NEEDED.log
+ fi
+ fi
+
+ PORTAGE_QUIET=${tmp_quiet}
+ fi
+
+ local unsafe_files=$(find "${D}" -type f '(' -perm -2002 -o -perm -4002 ')')
+ if [[ -n ${unsafe_files} ]] ; then
+ eqawarn "QA Notice: Unsafe files detected (set*id and world writable)"
+ eqawarn "${unsafe_files}"
+ die "Unsafe files found in \${D}. Portage will not install them."
+ fi
+
+ if [[ -d ${D}/${D} ]] ; then
+ declare -i INSTALLTOD=0
+ for i in $(find "${D}/${D}/"); do
+ eqawarn "QA Notice: /${i##${D}/${D}} installed in \${D}/\${D}"
+ ((INSTALLTOD++))
+ done
+ die "Aborting due to QA concerns: ${INSTALLTOD} files installed in ${D}/${D}"
+ unset INSTALLTOD
+ fi
+
+ # Sanity check syntax errors in init.d scripts
+ local d
+ for d in /etc/conf.d /etc/init.d ; do
+ [[ -d ${D}/${d} ]] || continue
+ for i in "${D}"/${d}/* ; do
+ [[ -L ${i} ]] && continue
+ # if empty conf.d/init.d dir exists (baselayout), then i will be "/etc/conf.d/*" and not exist
+ [[ ! -e ${i} ]] && continue
+ bash -n "${i}" || die "The init.d file has syntax errors: ${i}"
+ done
+ done
+
+ # this should help to ensure that all (most?) shared libraries are executable
+ # and that all libtool scripts / static libraries are not executable
+ local j
+ for i in "${D}"opt/*/lib{,32,64} \
+ "${D}"lib{,32,64} \
+ "${D}"usr/lib{,32,64} \
+ "${D}"usr/X11R6/lib{,32,64} ; do
+ [[ ! -d ${i} ]] && continue
+
+ for j in "${i}"/*.so.* "${i}"/*.so ; do
+ [[ ! -e ${j} ]] && continue
+ [[ -L ${j} ]] && continue
+ [[ -x ${j} ]] && continue
+ vecho "making executable: ${j#${D}}"
+ chmod +x "${j}"
+ done
+
+ for j in "${i}"/*.a "${i}"/*.la ; do
+ [[ ! -e ${j} ]] && continue
+ [[ -L ${j} ]] && continue
+ [[ ! -x ${j} ]] && continue
+ vecho "removing executable bit: ${j#${D}}"
+ chmod -x "${j}"
+ done
+
+ for j in "${i}"/*.{a,dll,dylib,sl,so}.* "${i}"/*.{a,dll,dylib,sl,so} ; do
+ [[ ! -e ${j} ]] && continue
+ [[ ! -L ${j} ]] && continue
+ linkdest=$(readlink "${j}")
+ if [[ ${linkdest} == /* ]] ; then
+ vecho -ne '\n'
+ eqawarn "QA Notice: Found an absolute symlink in a library directory:"
+ eqawarn " ${j#${D}} -> ${linkdest}"
+ eqawarn " It should be a relative symlink if in the same directory"
+ eqawarn " or a linker script if it crosses the /usr boundary."
+ fi
+ done
+ done
+
+ # When installing static libraries into /usr/lib and shared libraries into
+ # /lib, we have to make sure we have a linker script in /usr/lib along side
+ # the static library, or gcc will utilize the static lib when linking :(.
+ # http://bugs.gentoo.org/4411
+ abort="no"
+ local a s
+ for a in "${D}"usr/lib*/*.a ; do
+ s=${a%.a}.so
+ if [[ ! -e ${s} ]] ; then
+ s=${s%usr/*}${s##*/usr/}
+ if [[ -e ${s} ]] ; then
+ vecho -ne '\n'
+ eqawarn "QA Notice: Missing gen_usr_ldscript for ${s##*/}"
+ abort="yes"
+ fi
+ fi
+ done
+ [[ ${abort} == "yes" ]] && die "add those ldscripts"
+
+ # Make sure people don't store libtool files or static libs in /lib
+ f=$(ls "${D}"lib*/*.{a,la} 2>/dev/null)
+ if [[ -n ${f} ]] ; then
+ vecho -ne '\n'
+ eqawarn "QA Notice: Excessive files found in the / partition"
+ eqawarn "${f}"
+ vecho -ne '\n'
+ die "static archives (*.a) and libtool library files (*.la) do not belong in /"
+ fi
+
+ # Verify that the libtool files don't contain bogus $D entries.
+ local abort=no gentoo_bug=no always_overflow=no
+ for a in "${D}"usr/lib*/*.la ; do
+ s=${a##*/}
+ if grep -qs "${D}" "${a}" ; then
+ vecho -ne '\n'
+ eqawarn "QA Notice: ${s} appears to contain PORTAGE_TMPDIR paths"
+ abort="yes"
+ fi
+ done
+ [[ ${abort} == "yes" ]] && die "soiled libtool library files found"
+
+ # Evaluate misc gcc warnings
+ if [[ -n ${PORTAGE_LOG_FILE} && -r ${PORTAGE_LOG_FILE} ]] ; then
+ # In debug mode, this variable definition and corresponding grep calls
+ # will produce false positives if they're shown in the trace.
+ local reset_debug=0
+ if [[ ${-/x/} != $- ]] ; then
+ set +x
+ reset_debug=1
+ fi
+ local m msgs=(
+ ": warning: dereferencing type-punned pointer will break strict-aliasing rules"
+ ": warning: dereferencing pointer .* does break strict-aliasing rules"
+ ": warning: implicit declaration of function"
+ ": warning: incompatible implicit declaration of built-in function"
+ ": warning: is used uninitialized in this function" # we'll ignore "may" and "might"
+ ": warning: comparisons like X<=Y<=Z do not have their mathematical meaning"
+ ": warning: null argument where non-null required"
+ ": warning: array subscript is below array bounds"
+ ": warning: array subscript is above array bounds"
+ ": warning: attempt to free a non-heap object"
+ ": warning: .* called with .*bigger.* than .* destination buffer"
+ ": warning: call to .* will always overflow destination buffer"
+ ": warning: assuming pointer wraparound does not occur when comparing"
+ ": warning: hex escape sequence out of range"
+ ": warning: [^ ]*-hand operand of comma .*has no effect"
+ ": warning: converting to non-pointer type .* from NULL"
+ ": warning: NULL used in arithmetic"
+ ": warning: passing NULL to non-pointer argument"
+ ": warning: the address of [^ ]* will always evaluate as"
+ ": warning: the address of [^ ]* will never be NULL"
+ ": warning: too few arguments for format"
+ ": warning: reference to local variable .* returned"
+ ": warning: returning reference to temporary"
+ ": warning: function returns address of local variable"
+ # this may be valid code :/
+ #": warning: multi-character character constant"
+ # need to check these two ...
+ #": warning: assuming signed overflow does not occur when"
+ #": warning: comparison with string literal results in unspecified behav"
+ # yacc/lex likes to trigger this one
+ #": warning: extra tokens at end of .* directive"
+ # only gcc itself triggers this ?
+ #": warning: .*noreturn.* function does return"
+ # these throw false positives when 0 is used instead of NULL
+ #": warning: missing sentinel in function call"
+ #": warning: not enough variable arguments to fit a sentinel"
+ )
+ abort="no"
+ i=0
+ local grep_cmd=grep
+ [[ $PORTAGE_LOG_FILE = *.gz ]] && grep_cmd=zgrep
+ while [[ -n ${msgs[${i}]} ]] ; do
+ m=${msgs[$((i++))]}
+ # force C locale to work around slow unicode locales #160234
+ f=$(LC_ALL=C $grep_cmd "${m}" "${PORTAGE_LOG_FILE}")
+ if [[ -n ${f} ]] ; then
+ abort="yes"
+ # for now, don't make this fatal (see bug #337031)
+ #case "$m" in
+ # ": warning: call to .* will always overflow destination buffer") always_overflow=yes ;;
+ #esac
+ if [[ $always_overflow = yes ]] ; then
+ eerror
+ eerror "QA Notice: Package has poor programming practices which may compile"
+ eerror " fine but exhibit random runtime failures."
+ eerror
+ eerror "${f}"
+ eerror
+ eerror " Please file a bug about this at http://bugs.gentoo.org/"
+ eerror " with the maintaining herd of the package."
+ eerror
+ else
+ vecho -ne '\n'
+ eqawarn "QA Notice: Package has poor programming practices which may compile"
+ eqawarn " fine but exhibit random runtime failures."
+ eqawarn "${f}"
+ vecho -ne '\n'
+ fi
+ fi
+ done
+ local cat_cmd=cat
+ [[ $PORTAGE_LOG_FILE = *.gz ]] && cat_cmd=zcat
+ [[ $reset_debug = 1 ]] && set -x
+ f=$($cat_cmd "${PORTAGE_LOG_FILE}" | \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH"/check-implicit-pointer-usage.py || die "check-implicit-pointer-usage.py failed")
+ if [[ -n ${f} ]] ; then
+
+ # In the future this will be a forced "die". In preparation,
+ # increase the log level from "qa" to "eerror" so that people
+ # are aware this is a problem that must be fixed asap.
+
+ # just warn on 32bit hosts but bail on 64bit hosts
+ case ${CHOST} in
+ alpha*|hppa64*|ia64*|powerpc64*|mips64*|sparc64*|sparcv9*|x86_64*) gentoo_bug=yes ;;
+ esac
+
+ abort=yes
+
+ if [[ $gentoo_bug = yes ]] ; then
+ eerror
+ eerror "QA Notice: Package has poor programming practices which may compile"
+ eerror " but will almost certainly crash on 64bit architectures."
+ eerror
+ eerror "${f}"
+ eerror
+ eerror " Please file a bug about this at http://bugs.gentoo.org/"
+ eerror " with the maintaining herd of the package."
+ eerror
+ else
+ vecho -ne '\n'
+ eqawarn "QA Notice: Package has poor programming practices which may compile"
+ eqawarn " but will almost certainly crash on 64bit architectures."
+ eqawarn "${f}"
+ vecho -ne '\n'
+ fi
+
+ fi
+ if [[ ${abort} == "yes" ]] ; then
+ if [[ $gentoo_bug = yes || $always_overflow = yes ]] ; then
+ die "install aborted due to" \
+ "poor programming practices shown above"
+ else
+ echo "Please do not file a Gentoo bug and instead" \
+ "report the above QA issues directly to the upstream" \
+ "developers of this software." | fmt -w 70 | \
+ while read -r line ; do eqawarn "${line}" ; done
+ eqawarn "Homepage: ${HOMEPAGE}"
+ has stricter ${FEATURES} && die "install aborted due to" \
+ "poor programming practices shown above"
+ fi
+ fi
+ fi
+
+ # Portage regenerates this on the installed system.
+ rm -f "${D}"/usr/share/info/dir{,.gz,.bz2}
+
+ if has multilib-strict ${FEATURES} && \
+ [[ -x /usr/bin/file && -x /usr/bin/find ]] && \
+ [[ -n ${MULTILIB_STRICT_DIRS} && -n ${MULTILIB_STRICT_DENY} ]]
+ then
+ local abort=no dir file firstrun=yes
+ MULTILIB_STRICT_EXEMPT=$(echo ${MULTILIB_STRICT_EXEMPT} | sed -e 's:\([(|)]\):\\\1:g')
+ for dir in ${MULTILIB_STRICT_DIRS} ; do
+ [[ -d ${D}/${dir} ]] || continue
+ for file in $(find ${D}/${dir} -type f | grep -v "^${D}/${dir}/${MULTILIB_STRICT_EXEMPT}"); do
+ if file ${file} | egrep -q "${MULTILIB_STRICT_DENY}" ; then
+ if [[ ${firstrun} == yes ]] ; then
+ echo "Files matching a file type that is not allowed:"
+ firstrun=no
+ fi
+ abort=yes
+ echo " ${file#${D}//}"
+ fi
+ done
+ done
+ [[ ${abort} == yes ]] && die "multilib-strict check failed!"
+ fi
+
+ # ensure packages don't install systemd units automagically
+ if ! has systemd ${INHERITED} && \
+ [[ -d "${D}"/lib/systemd/system ]]
+ then
+ eqawarn "QA Notice: package installs systemd unit files (/lib/systemd/system)"
+ eqawarn " but does not inherit systemd.eclass."
+ has stricter ${FEATURES} \
+ && die "install aborted due to missing inherit of systemd.eclass"
+ fi
+}
+
+
+install_mask() {
+ local root="$1"
+ shift
+ local install_mask="$*"
+
+ # we don't want globbing for initial expansion, but afterwards, we do
+ local shopts=$-
+ set -o noglob
+ local no_inst
+ for no_inst in ${install_mask}; do
+ set +o noglob
+ quiet_mode || einfo "Removing ${no_inst}"
+ # normal stuff
+ rm -Rf "${root}"/${no_inst} >&/dev/null
+
+ # we also need to handle globs (*.a, *.h, etc)
+ find "${root}" \( -path "${no_inst}" -or -name "${no_inst}" \) \
+ -exec rm -fR {} \; >/dev/null 2>&1
+ done
+ # set everything back the way we found it
+ set +o noglob
+ set -${shopts}
+}
+
+preinst_mask() {
+ if [ -z "${D}" ]; then
+ eerror "${FUNCNAME}: D is unset"
+ return 1
+ fi
+
+ # Make sure $PWD is not ${D} so that we don't leave gmon.out files
+ # in there in case any tools were built with -pg in CFLAGS.
+ cd "${T}"
+
+ # remove man pages, info pages, docs if requested
+ local f
+ for f in man info doc; do
+ if has no${f} $FEATURES; then
+ INSTALL_MASK="${INSTALL_MASK} /usr/share/${f}"
+ fi
+ done
+
+ install_mask "${D}" "${INSTALL_MASK}"
+
+ # remove share dir if unnessesary
+ if has nodoc $FEATURES || has noman $FEATURES || has noinfo $FEATURES; then
+ rmdir "${D}usr/share" &> /dev/null
+ fi
+}
+
+preinst_sfperms() {
+ if [ -z "${D}" ]; then
+ eerror "${FUNCNAME}: D is unset"
+ return 1
+ fi
+ # Smart FileSystem Permissions
+ if has sfperms $FEATURES; then
+ local i
+ find "${D}" -type f -perm -4000 -print0 | \
+ while read -r -d $'\0' i ; do
+ if [ -n "$(find "$i" -perm -2000)" ] ; then
+ ebegin ">>> SetUID and SetGID: [chmod o-r] /${i#${D}}"
+ chmod o-r "$i"
+ eend $?
+ else
+ ebegin ">>> SetUID: [chmod go-r] /${i#${D}}"
+ chmod go-r "$i"
+ eend $?
+ fi
+ done
+ find "${D}" -type f -perm -2000 -print0 | \
+ while read -r -d $'\0' i ; do
+ if [ -n "$(find "$i" -perm -4000)" ] ; then
+ # This case is already handled
+ # by the SetUID check above.
+ true
+ else
+ ebegin ">>> SetGID: [chmod o-r] /${i#${D}}"
+ chmod o-r "$i"
+ eend $?
+ fi
+ done
+ fi
+}
+
+preinst_suid_scan() {
+ if [ -z "${D}" ]; then
+ eerror "${FUNCNAME}: D is unset"
+ return 1
+ fi
+ # total suid control.
+ if has suidctl $FEATURES; then
+ local i sfconf x
+ sfconf=${PORTAGE_CONFIGROOT}etc/portage/suidctl.conf
+ # sandbox prevents us from writing directly
+ # to files outside of the sandbox, but this
+ # can easly be bypassed using the addwrite() function
+ addwrite "${sfconf}"
+ vecho ">>> Performing suid scan in ${D}"
+ for i in $(find "${D}" -type f \( -perm -4000 -o -perm -2000 \) ); do
+ if [ -s "${sfconf}" ]; then
+ install_path=/${i#${D}}
+ if grep -q "^${install_path}\$" "${sfconf}" ; then
+ vecho "- ${install_path} is an approved suid file"
+ else
+ vecho ">>> Removing sbit on non registered ${install_path}"
+ for x in 5 4 3 2 1 0; do sleep 0.25 ; done
+ ls_ret=$(ls -ldh "${i}")
+ chmod ugo-s "${i}"
+ grep "^#${install_path}$" "${sfconf}" > /dev/null || {
+ vecho ">>> Appending commented out entry to ${sfconf} for ${PF}"
+ echo "## ${ls_ret%${D}*}${install_path}" >> "${sfconf}"
+ echo "#${install_path}" >> "${sfconf}"
+ # no delwrite() eh?
+ # delwrite ${sconf}
+ }
+ fi
+ else
+ vecho "suidctl feature set but you are lacking a ${sfconf}"
+ fi
+ done
+ fi
+}
+
+preinst_selinux_labels() {
+ if [ -z "${D}" ]; then
+ eerror "${FUNCNAME}: D is unset"
+ return 1
+ fi
+ if has selinux ${FEATURES}; then
+ # SELinux file labeling (needs to always be last in dyn_preinst)
+ # only attempt to label if setfiles is executable
+ # and 'context' is available on selinuxfs.
+ if [ -f /selinux/context -a -x /usr/sbin/setfiles -a -x /usr/sbin/selinuxconfig ]; then
+ vecho ">>> Setting SELinux security labels"
+ (
+ eval "$(/usr/sbin/selinuxconfig)" || \
+ die "Failed to determine SELinux policy paths.";
+
+ addwrite /selinux/context;
+
+ /usr/sbin/setfiles "${file_contexts_path}" -r "${D}" "${D}"
+ ) || die "Failed to set SELinux security labels."
+ else
+ # nonfatal, since merging can happen outside a SE kernel
+ # like during a recovery situation
+ vecho "!!! Unable to set SELinux security labels"
+ fi
+ fi
+}
+
+dyn_package() {
+ # Make sure $PWD is not ${D} so that we don't leave gmon.out files
+ # in there in case any tools were built with -pg in CFLAGS.
+ cd "${T}"
+ install_mask "${PORTAGE_BUILDDIR}/image" "${PKG_INSTALL_MASK}"
+ local tar_options=""
+ [[ $PORTAGE_VERBOSE = 1 ]] && tar_options+=" -v"
+ # Sandbox is disabled in case the user wants to use a symlink
+ # for $PKGDIR and/or $PKGDIR/All.
+ export SANDBOX_ON="0"
+ [ -z "${PORTAGE_BINPKG_TMPFILE}" ] && \
+ die "PORTAGE_BINPKG_TMPFILE is unset"
+ mkdir -p "${PORTAGE_BINPKG_TMPFILE%/*}" || die "mkdir failed"
+ tar $tar_options -cf - $PORTAGE_BINPKG_TAR_OPTS -C "${D}" . | \
+ $PORTAGE_BZIP2_COMMAND -c > "$PORTAGE_BINPKG_TMPFILE"
+ assert "failed to pack binary package: '$PORTAGE_BINPKG_TMPFILE'"
+ PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+ "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH"/xpak-helper.py recompose \
+ "$PORTAGE_BINPKG_TMPFILE" "$PORTAGE_BUILDDIR/build-info"
+ if [ $? -ne 0 ]; then
+ rm -f "${PORTAGE_BINPKG_TMPFILE}"
+ die "Failed to append metadata to the tbz2 file"
+ fi
+ local md5_hash=""
+ if type md5sum &>/dev/null ; then
+ md5_hash=$(md5sum "${PORTAGE_BINPKG_TMPFILE}")
+ md5_hash=${md5_hash%% *}
+ elif type md5 &>/dev/null ; then
+ md5_hash=$(md5 "${PORTAGE_BINPKG_TMPFILE}")
+ md5_hash=${md5_hash##* }
+ fi
+ [ -n "${md5_hash}" ] && \
+ echo ${md5_hash} > "${PORTAGE_BUILDDIR}"/build-info/BINPKGMD5
+ vecho ">>> Done."
+ cd "${PORTAGE_BUILDDIR}"
+ >> "$PORTAGE_BUILDDIR/.packaged" || \
+ die "Failed to create $PORTAGE_BUILDDIR/.packaged"
+}
+
+dyn_spec() {
+ local sources_dir=/usr/src/rpm/SOURCES
+ mkdir -p "${sources_dir}"
+ declare -a tar_args=("${EBUILD}")
+ [[ -d ${FILESDIR} ]] && tar_args=("${EBUILD}" "${FILESDIR}")
+ tar czf "${sources_dir}/${PF}.tar.gz" \
+ "${tar_args[@]}" || \
+ die "Failed to create base rpm tarball."
+
+ cat <<__END1__ > ${PF}.spec
+Summary: ${DESCRIPTION}
+Name: ${PN}
+Version: ${PV}
+Release: ${PR}
+Copyright: GPL
+Group: portage/${CATEGORY}
+Source: ${PF}.tar.gz
+Buildroot: ${D}
+%description
+${DESCRIPTION}
+
+${HOMEPAGE}
+
+%prep
+%setup -c
+
+%build
+
+%install
+
+%clean
+
+%files
+/
+__END1__
+
+}
+
+dyn_rpm() {
+ cd "${T}" || die "cd failed"
+ local machine_name=$(uname -m)
+ local dest_dir=/usr/src/rpm/RPMS/${machine_name}
+ addwrite /usr/src/rpm
+ addwrite "${RPMDIR}"
+ dyn_spec
+ rpmbuild -bb --clean --rmsource "${PF}.spec" || die "Failed to integrate rpm spec file"
+ install -D "${dest_dir}/${PN}-${PV}-${PR}.${machine_name}.rpm" \
+ "${RPMDIR}/${CATEGORY}/${PN}-${PV}-${PR}.rpm" || \
+ die "Failed to move rpm"
+}
+
+die_hooks() {
+ [[ -f $PORTAGE_BUILDDIR/.die_hooks ]] && return
+ local x
+ for x in $EBUILD_DEATH_HOOKS ; do
+ $x >&2
+ done
+ > "$PORTAGE_BUILDDIR/.die_hooks"
+}
+
+success_hooks() {
+ local x
+ for x in $EBUILD_SUCCESS_HOOKS ; do
+ $x
+ done
+}
+
+if [ -n "${MISC_FUNCTIONS_ARGS}" ]; then
+ source_all_bashrcs
+ [ "$PORTAGE_DEBUG" == "1" ] && set -x
+ for x in ${MISC_FUNCTIONS_ARGS}; do
+ ${x}
+ done
+ unset x
+ [[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
+ if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+ [[ ! -s $SANDBOX_LOG ]]
+ "$PORTAGE_BIN_PATH"/ebuild-ipc exit $?
+ fi
+fi
+
+:
diff --git a/portage_with_autodep/bin/portageq b/portage_with_autodep/bin/portageq
new file mode 100755
index 0000000..57a7c39
--- /dev/null
+++ b/portage_with_autodep/bin/portageq
@@ -0,0 +1,822 @@
+#!/usr/bin/python -O
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+ def exithandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGINT, exithandler)
+ signal.signal(signal.SIGTERM, exithandler)
+
+except KeyboardInterrupt:
+ sys.exit(128 + signal.SIGINT)
+
+import os
+import types
+
+# Avoid sandbox violations after python upgrade.
+pym_path = os.path.join(os.path.dirname(
+ os.path.dirname(os.path.realpath(__file__))), "pym")
+if os.environ.get("SANDBOX_ON") == "1":
+ sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+ if pym_path not in sandbox_write:
+ sandbox_write.append(pym_path)
+ os.environ["SANDBOX_WRITE"] = \
+ ":".join(filter(None, sandbox_write))
+ del sandbox_write
+
+try:
+ import portage
+except ImportError:
+ sys.path.insert(0, pym_path)
+ import portage
+del pym_path
+
+from portage import os
+from portage.util import writemsg, writemsg_stdout
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'subprocess',
+ '_emerge.Package:Package',
+ '_emerge.RootConfig:RootConfig',
+ 'portage.dbapi._expand_new_virt:expand_new_virt',
+)
+
+def eval_atom_use(atom):
+ if 'USE' in os.environ:
+ use = frozenset(os.environ['USE'].split())
+ atom = atom.evaluate_conditionals(use)
+ return atom
+
+#-----------------------------------------------------------------------------
+#
+# To add functionality to this tool, add a function below.
+#
+# The format for functions is:
+#
+# def function(argv):
+# """<list of options for this function>
+# <description of the function>
+# """
+# <code>
+#
+# "argv" is an array of the command line parameters provided after the command.
+#
+# Make sure you document the function in the right format. The documentation
+# is used to display help on the function.
+#
+# You do not need to add the function to any lists, this tool is introspective,
+# and will automaticly add a command by the same name as the function!
+#
+
+def has_version(argv):
+ """<root> <category/package>
+ Return code 0 if it's available, 1 otherwise.
+ """
+ if (len(argv) < 2):
+ print("ERROR: insufficient parameters!")
+ sys.exit(2)
+
+ warnings = []
+
+ try:
+ atom = portage.dep.Atom(argv[1])
+ except portage.exception.InvalidAtom:
+ if atom_validate_strict:
+ portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
+ noiselevel=-1)
+ return 2
+ else:
+ atom = argv[1]
+ else:
+ if atom_validate_strict:
+ try:
+ atom = portage.dep.Atom(argv[1], eapi=eapi)
+ except portage.exception.InvalidAtom as e:
+ warnings.append(
+ portage._unicode_decode("QA Notice: %s: %s") % \
+ ('has_version', e))
+ atom = eval_atom_use(atom)
+
+ if warnings:
+ elog('eqawarn', warnings)
+
+ try:
+ mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
+ if mylist:
+ sys.exit(0)
+ else:
+ sys.exit(1)
+ except KeyError:
+ sys.exit(1)
+ except portage.exception.InvalidAtom:
+ portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
+ noiselevel=-1)
+ return 2
+has_version.uses_root = True
+
+
+def best_version(argv):
+ """<root> <category/package>
+ Returns category/package-version (without .ebuild).
+ """
+ if (len(argv) < 2):
+ print("ERROR: insufficient parameters!")
+ sys.exit(2)
+
+ warnings = []
+
+ try:
+ atom = portage.dep.Atom(argv[1])
+ except portage.exception.InvalidAtom:
+ if atom_validate_strict:
+ portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
+ noiselevel=-1)
+ return 2
+ else:
+ atom = argv[1]
+ else:
+ if atom_validate_strict:
+ try:
+ atom = portage.dep.Atom(argv[1], eapi=eapi)
+ except portage.exception.InvalidAtom as e:
+ warnings.append(
+ portage._unicode_decode("QA Notice: %s: %s") % \
+ ('best_version', e))
+ atom = eval_atom_use(atom)
+
+ if warnings:
+ elog('eqawarn', warnings)
+
+ try:
+ mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
+ print(portage.best(mylist))
+ except KeyError:
+ sys.exit(1)
+best_version.uses_root = True
+
+
+def mass_best_version(argv):
+ """<root> [<category/package>]+
+ Returns category/package-version (without .ebuild).
+ """
+ if (len(argv) < 2):
+ print("ERROR: insufficient parameters!")
+ sys.exit(2)
+ try:
+ for pack in argv[1:]:
+ mylist=portage.db[argv[0]]["vartree"].dbapi.match(pack)
+ print(pack+":"+portage.best(mylist))
+ except KeyError:
+ sys.exit(1)
+mass_best_version.uses_root = True
+
+def metadata(argv):
+ if (len(argv) < 4):
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ sys.exit(2)
+
+ root, pkgtype, pkgspec = argv[0:3]
+ metakeys = argv[3:]
+ type_map = {
+ "ebuild":"porttree",
+ "binary":"bintree",
+ "installed":"vartree"}
+ if pkgtype not in type_map:
+ print("Unrecognized package type: '%s'" % pkgtype, file=sys.stderr)
+ sys.exit(1)
+ trees = portage.db
+ if os.path.realpath(root) == os.path.realpath(portage.settings["ROOT"]):
+ root = portage.settings["ROOT"] # contains the normalized $ROOT
+ try:
+ values = trees[root][type_map[pkgtype]].dbapi.aux_get(
+ pkgspec, metakeys)
+ writemsg_stdout(''.join('%s\n' % x for x in values), noiselevel=-1)
+ except KeyError:
+ print("Package not found: '%s'" % pkgspec, file=sys.stderr)
+ sys.exit(1)
+
+metadata.__doc__ = """
+<root> <pkgtype> <category/package> [<key>]+
+Returns metadata values for the specified package.
+Available keys: %s
+""" % ','.join(sorted(x for x in portage.auxdbkeys \
+if not x.startswith('UNUSED_')))
+
+metadata.uses_root = True
+
+def contents(argv):
+ """<root> <category/package>
+ List the files that are installed for a given package, with
+ one file listed on each line. All file names will begin with
+ <root>.
+ """
+ if len(argv) != 2:
+ print("ERROR: expected 2 parameters, got %d!" % len(argv))
+ return 2
+
+ root, cpv = argv
+ vartree = portage.db[root]["vartree"]
+ if not vartree.dbapi.cpv_exists(cpv):
+ sys.stderr.write("Package not found: '%s'\n" % cpv)
+ return 1
+ cat, pkg = portage.catsplit(cpv)
+ db = portage.dblink(cat, pkg, root, vartree.settings,
+ treetype="vartree", vartree=vartree)
+ writemsg_stdout(''.join('%s\n' % x for x in sorted(db.getcontents())),
+ noiselevel=-1)
+contents.uses_root = True
+
+def owners(argv):
+ """<root> [<filename>]+
+ Given a list of files, print the packages that own the files and which
+ files belong to each package. Files owned by a package are listed on
+ the lines below it, indented by a single tab character (\\t). All file
+ paths must either start with <root> or be a basename alone.
+ Returns 1 if no owners could be found, and 0 otherwise.
+ """
+ if len(argv) < 2:
+ sys.stderr.write("ERROR: insufficient parameters!\n")
+ sys.stderr.flush()
+ return 2
+
+ from portage import catsplit, dblink
+ settings = portage.settings
+ root = settings["ROOT"]
+ vardb = portage.db[root]["vartree"].dbapi
+
+ cwd = None
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ pass
+
+ files = []
+ orphan_abs_paths = set()
+ orphan_basenames = set()
+ for f in argv[1:]:
+ f = portage.normalize_path(f)
+ is_basename = os.sep not in f
+ if not is_basename and f[:1] != os.sep:
+ if cwd is None:
+ sys.stderr.write("ERROR: cwd does not exist!\n")
+ sys.stderr.flush()
+ return 2
+ f = os.path.join(cwd, f)
+ f = portage.normalize_path(f)
+ if not is_basename and not f.startswith(root):
+ sys.stderr.write("ERROR: file paths must begin with <root>!\n")
+ sys.stderr.flush()
+ return 2
+ if is_basename:
+ files.append(f)
+ orphan_basenames.add(f)
+ else:
+ files.append(f[len(root)-1:])
+ orphan_abs_paths.add(f)
+
+ owners = vardb._owners.get_owners(files)
+
+ msg = []
+ for pkg, owned_files in owners.items():
+ cpv = pkg.mycpv
+ msg.append("%s\n" % cpv)
+ for f in sorted(owned_files):
+ f_abs = os.path.join(root, f.lstrip(os.path.sep))
+ msg.append("\t%s\n" % (f_abs,))
+ orphan_abs_paths.discard(f_abs)
+ if orphan_basenames:
+ orphan_basenames.discard(os.path.basename(f_abs))
+
+ writemsg_stdout(''.join(msg), noiselevel=-1)
+
+ if orphan_abs_paths or orphan_basenames:
+ orphans = []
+ orphans.extend(orphan_abs_paths)
+ orphans.extend(orphan_basenames)
+ orphans.sort()
+ msg = []
+ msg.append("None of the installed packages claim these files:\n")
+ for f in orphans:
+ msg.append("\t%s\n" % (f,))
+ sys.stderr.write("".join(msg))
+ sys.stderr.flush()
+
+ if owners:
+ return 0
+ return 1
+
+owners.uses_root = True
+
+def is_protected(argv):
+ """<root> <filename>
+ Given a single filename, return code 0 if it's protected, 1 otherwise.
+ The filename must begin with <root>.
+ """
+ if len(argv) != 2:
+ sys.stderr.write("ERROR: expected 2 parameters, got %d!\n" % len(argv))
+ sys.stderr.flush()
+ return 2
+
+ root, filename = argv
+
+ err = sys.stderr
+ cwd = None
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ pass
+
+ f = portage.normalize_path(filename)
+ if not f.startswith(os.path.sep):
+ if cwd is None:
+ err.write("ERROR: cwd does not exist!\n")
+ err.flush()
+ return 2
+ f = os.path.join(cwd, f)
+ f = portage.normalize_path(f)
+
+ if not f.startswith(root):
+ err.write("ERROR: file paths must begin with <root>!\n")
+ err.flush()
+ return 2
+
+ from portage.util import ConfigProtect
+
+ settings = portage.settings
+ protect = portage.util.shlex_split(settings.get("CONFIG_PROTECT", ""))
+ protect_mask = portage.util.shlex_split(
+ settings.get("CONFIG_PROTECT_MASK", ""))
+ protect_obj = ConfigProtect(root, protect, protect_mask)
+
+ if protect_obj.isprotected(f):
+ return 0
+ return 1
+
+is_protected.uses_root = True
+
+def filter_protected(argv):
+ """<root>
+ Read filenames from stdin and write them to stdout if they are protected.
+ All filenames are delimited by \\n and must begin with <root>.
+ """
+ if len(argv) != 1:
+ sys.stderr.write("ERROR: expected 1 parameter, got %d!\n" % len(argv))
+ sys.stderr.flush()
+ return 2
+
+ root, = argv
+ out = sys.stdout
+ err = sys.stderr
+ cwd = None
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ pass
+
+ from portage.util import ConfigProtect
+
+ settings = portage.settings
+ protect = portage.util.shlex_split(settings.get("CONFIG_PROTECT", ""))
+ protect_mask = portage.util.shlex_split(
+ settings.get("CONFIG_PROTECT_MASK", ""))
+ protect_obj = ConfigProtect(root, protect, protect_mask)
+
+ protected = 0
+ errors = 0
+
+ for line in sys.stdin:
+ filename = line.rstrip("\n")
+ f = portage.normalize_path(filename)
+ if not f.startswith(os.path.sep):
+ if cwd is None:
+ err.write("ERROR: cwd does not exist!\n")
+ err.flush()
+ errors += 1
+ continue
+ f = os.path.join(cwd, f)
+ f = portage.normalize_path(f)
+
+ if not f.startswith(root):
+ err.write("ERROR: file paths must begin with <root>!\n")
+ err.flush()
+ errors += 1
+ continue
+
+ if protect_obj.isprotected(f):
+ protected += 1
+ out.write("%s\n" % filename)
+ out.flush()
+
+ if errors:
+ return 2
+
+ return 0
+
+filter_protected.uses_root = True
+
+def best_visible(argv):
+ """<root> [pkgtype] <atom>
+ Returns category/package-version (without .ebuild).
+ The pkgtype argument defaults to "ebuild" if unspecified,
+ otherwise it must be one of ebuild, binary, or installed.
+ """
+ if (len(argv) < 2):
+ writemsg("ERROR: insufficient parameters!\n", noiselevel=-1)
+ return 2
+
+ pkgtype = "ebuild"
+ if len(argv) > 2:
+ pkgtype = argv[1]
+ atom = argv[2]
+ else:
+ atom = argv[1]
+
+ type_map = {
+ "ebuild":"porttree",
+ "binary":"bintree",
+ "installed":"vartree"}
+
+ if pkgtype not in type_map:
+ writemsg("Unrecognized package type: '%s'\n" % pkgtype,
+ noiselevel=-1)
+ return 2
+
+ db = portage.db[portage.settings["ROOT"]][type_map[pkgtype]].dbapi
+
+ try:
+ atom = portage.dep_expand(atom, mydb=db, settings=portage.settings)
+ except portage.exception.InvalidAtom:
+ writemsg("ERROR: Invalid atom: '%s'\n" % atom,
+ noiselevel=-1)
+ return 2
+
+ root_config = RootConfig(portage.settings,
+ portage.db[portage.settings["ROOT"]], None)
+
+ try:
+ # reversed, for descending order
+ for cpv in reversed(db.match(atom)):
+ metadata = dict(zip(Package.metadata_keys,
+ db.aux_get(cpv, Package.metadata_keys, myrepo=atom.repo)))
+ pkg = Package(built=(pkgtype != "ebuild"), cpv=cpv,
+ installed=(pkgtype=="installed"), metadata=metadata,
+ root_config=root_config, type_name=pkgtype)
+ if pkg.visible:
+ writemsg_stdout("%s\n" % (pkg.cpv,), noiselevel=-1)
+ return os.EX_OK
+ except KeyError:
+ pass
+ return 1
+best_visible.uses_root = True
+
+
+def mass_best_visible(argv):
+ """<root> [<category/package>]+
+ Returns category/package-version (without .ebuild).
+ """
+ if (len(argv) < 2):
+ print("ERROR: insufficient parameters!")
+ sys.exit(2)
+ try:
+ for pack in argv[1:]:
+ mylist=portage.db[argv[0]]["porttree"].dbapi.match(pack)
+ print(pack+":"+portage.best(mylist))
+ except KeyError:
+ sys.exit(1)
+mass_best_visible.uses_root = True
+
+
+def all_best_visible(argv):
+ """<root>
+ Returns all best_visible packages (without .ebuild).
+ """
+ if len(argv) < 1:
+ sys.stderr.write("ERROR: insufficient parameters!\n")
+ sys.stderr.flush()
+ return 2
+
+ #print portage.db[argv[0]]["porttree"].dbapi.cp_all()
+ for pkg in portage.db[argv[0]]["porttree"].dbapi.cp_all():
+ mybest=portage.best(portage.db[argv[0]]["porttree"].dbapi.match(pkg))
+ if mybest:
+ print(mybest)
+all_best_visible.uses_root = True
+
+
+def match(argv):
+ """<root> <atom>
+ Returns a \\n separated list of category/package-version.
+ When given an empty string, all installed packages will
+ be listed.
+ """
+ if len(argv) != 2:
+ print("ERROR: expected 2 parameters, got %d!" % len(argv))
+ sys.exit(2)
+ root, atom = argv
+ if atom:
+ if atom_validate_strict and not portage.isvalidatom(atom):
+ portage.writemsg("ERROR: Invalid atom: '%s'\n" % atom,
+ noiselevel=-1)
+ return 2
+ results = portage.db[root]["vartree"].dbapi.match(atom)
+ else:
+ results = portage.db[root]["vartree"].dbapi.cpv_all()
+ results.sort()
+ for cpv in results:
+ print(cpv)
+match.uses_root = True
+
+def expand_virtual(argv):
+ """<root> <atom>
+ Returns a \\n separated list of atoms expanded from a
+ given virtual atom (GLEP 37 virtuals only),
+ excluding blocker atoms. Satisfied
+ virtual atoms are not included in the output, since
+ they are expanded to real atoms which are displayed.
+ Unsatisfied virtual atoms are displayed without
+ any expansion. The "match" command can be used to
+ resolve the returned atoms to specific installed
+ packages.
+ """
+ if len(argv) != 2:
+ writemsg("ERROR: expected 2 parameters, got %d!\n" % len(argv),
+ noiselevel=-1)
+ return 2
+
+ root, atom = argv
+
+ try:
+ results = list(expand_new_virt(
+ portage.db[root]["vartree"].dbapi, atom))
+ except portage.exception.InvalidAtom:
+ writemsg("ERROR: Invalid atom: '%s'\n" % atom,
+ noiselevel=-1)
+ return 2
+
+ results.sort()
+ for x in results:
+ if not x.blocker:
+ writemsg_stdout("%s\n" % (x,))
+
+ return os.EX_OK
+
+expand_virtual.uses_root = True
+
+def vdb_path(argv):
+ """
+ Returns the path used for the var(installed) package database for the
+ set environment/configuration options.
+ """
+ out = sys.stdout
+ out.write(os.path.join(portage.settings["EROOT"], portage.VDB_PATH) + "\n")
+ out.flush()
+ return os.EX_OK
+
+def gentoo_mirrors(argv):
+ """
+ Returns the mirrors set to use in the portage configuration.
+ """
+ print(portage.settings["GENTOO_MIRRORS"])
+
+
+def portdir(argv):
+ """
+ Returns the PORTDIR path.
+ """
+ print(portage.settings["PORTDIR"])
+
+
+def config_protect(argv):
+ """
+ Returns the CONFIG_PROTECT paths.
+ """
+ print(portage.settings["CONFIG_PROTECT"])
+
+
+def config_protect_mask(argv):
+ """
+ Returns the CONFIG_PROTECT_MASK paths.
+ """
+ print(portage.settings["CONFIG_PROTECT_MASK"])
+
+
+def portdir_overlay(argv):
+ """
+ Returns the PORTDIR_OVERLAY path.
+ """
+ print(portage.settings["PORTDIR_OVERLAY"])
+
+
+def pkgdir(argv):
+ """
+ Returns the PKGDIR path.
+ """
+ print(portage.settings["PKGDIR"])
+
+
+def distdir(argv):
+ """
+ Returns the DISTDIR path.
+ """
+ print(portage.settings["DISTDIR"])
+
+
+def envvar(argv):
+ """<variable>+
+ Returns a specific environment variable as exists prior to ebuild.sh.
+ Similar to: emerge --verbose --info | egrep '^<variable>='
+ """
+ verbose = "-v" in argv
+ if verbose:
+ argv.pop(argv.index("-v"))
+
+ if len(argv) == 0:
+ print("ERROR: insufficient parameters!")
+ sys.exit(2)
+
+ for arg in argv:
+ if verbose:
+ print(arg +"='"+ portage.settings[arg] +"'")
+ else:
+ print(portage.settings[arg])
+
+def get_repos(argv):
+ """<root>
+ Returns all repos with names (repo_name file) argv[0] = $ROOT
+ """
+ if len(argv) < 1:
+ print("ERROR: insufficient parameters!")
+ sys.exit(2)
+ print(" ".join(portage.db[argv[0]]["porttree"].dbapi.getRepositories()))
+
+def get_repo_path(argv):
+ """<root> <repo_id>+
+ Returns the path to the repo named argv[1], argv[0] = $ROOT
+ """
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!")
+ sys.exit(2)
+ for arg in argv[1:]:
+ path = portage.db[argv[0]]["porttree"].dbapi.getRepositoryPath(arg)
+ if path is None:
+ path = ""
+ print(path)
+
+def list_preserved_libs(argv):
+ """<root>
+ Print a list of libraries preserved during a package update in the form
+ package: path. Returns 1 if no preserved libraries could be found,
+ 0 otherwise.
+ """
+
+ if len(argv) != 1:
+ print("ERROR: wrong number of arguments")
+ sys.exit(2)
+ mylibs = portage.db[argv[0]]["vartree"].dbapi._plib_registry.getPreservedLibs()
+ rValue = 1
+ msg = []
+ for cpv in sorted(mylibs):
+ msg.append(cpv)
+ for path in mylibs[cpv]:
+ msg.append(' ' + path)
+ rValue = 0
+ msg.append('\n')
+ writemsg_stdout(''.join(msg), noiselevel=-1)
+ return rValue
+list_preserved_libs.uses_root = True
+
+#-----------------------------------------------------------------------------
+#
+# DO NOT CHANGE CODE BEYOND THIS POINT - IT'S NOT NEEDED!
+#
+
+if not portage.const._ENABLE_PRESERVE_LIBS:
+ del list_preserved_libs
+
+non_commands = frozenset(['elog', 'eval_atom_use',
+ 'exithandler', 'expand_new_virt', 'main',
+ 'usage', 'writemsg', 'writemsg_stdout'])
+commands = sorted(k for k, v in globals().items() \
+ if k not in non_commands and isinstance(v, types.FunctionType))
+
+def usage(argv):
+ print(">>> Portage information query tool")
+ print(">>> %s" % portage.VERSION)
+ print(">>> Usage: portageq <command> [<option> ...]")
+ print("")
+ print("Available commands:")
+
+ #
+ # Show our commands -- we do this by scanning the functions in this
+ # file, and formatting each functions documentation.
+ #
+ help_mode = '--help' in sys.argv
+ for name in commands:
+ # Drop non-functions
+ obj = globals()[name]
+
+ doc = obj.__doc__
+ if (doc == None):
+ print(" " + name)
+ print(" MISSING DOCUMENTATION!")
+ print("")
+ continue
+
+ lines = doc.lstrip("\n").split("\n")
+ print(" " + name + " " + lines[0].strip())
+ if (len(sys.argv) > 1):
+ if (not help_mode):
+ lines = lines[:-1]
+ for line in lines[1:]:
+ print(" " + line.strip())
+ if (len(sys.argv) == 1):
+ print("\nRun portageq with --help for info")
+
+atom_validate_strict = "EBUILD_PHASE" in os.environ
+eapi = None
+if atom_validate_strict:
+ eapi = os.environ.get('EAPI')
+
+ def elog(elog_funcname, lines):
+ cmd = "source '%s/isolated-functions.sh' ; " % \
+ os.environ["PORTAGE_BIN_PATH"]
+ for line in lines:
+ cmd += "%s %s ; " % (elog_funcname, portage._shell_quote(line))
+ subprocess.call([portage.const.BASH_BINARY, "-c", cmd])
+
+else:
+ def elog(elog_funcname, lines):
+ pass
+
+def main():
+
+ nocolor = os.environ.get('NOCOLOR')
+ if nocolor in ('yes', 'true'):
+ portage.output.nocolor()
+
+ if len(sys.argv) < 2:
+ usage(sys.argv)
+ sys.exit(os.EX_USAGE)
+
+ for x in sys.argv:
+ if x in ("-h", "--help"):
+ usage(sys.argv)
+ sys.exit(os.EX_OK)
+ elif x == "--version":
+ print("Portage", portage.VERSION)
+ sys.exit(os.EX_OK)
+
+ cmd = sys.argv[1]
+ function = globals().get(cmd)
+ if function is None or cmd not in commands:
+ usage(sys.argv)
+ sys.exit(os.EX_USAGE)
+ function = globals()[cmd]
+ uses_root = getattr(function, "uses_root", False) and len(sys.argv) > 2
+ if uses_root:
+ if not os.path.isdir(sys.argv[2]):
+ sys.stderr.write("Not a directory: '%s'\n" % sys.argv[2])
+ sys.stderr.write("Run portageq with --help for info\n")
+ sys.stderr.flush()
+ sys.exit(os.EX_USAGE)
+ os.environ["ROOT"] = sys.argv[2]
+
+ args = sys.argv[2:]
+ if args and sys.hexversion < 0x3000000 and not isinstance(args[0], unicode):
+ for i in range(len(args)):
+ args[i] = portage._unicode_decode(args[i])
+
+ try:
+ if uses_root:
+ args[0] = portage.settings["ROOT"]
+ retval = function(args)
+ if retval:
+ sys.exit(retval)
+ except portage.exception.PermissionDenied as e:
+ sys.stderr.write("Permission denied: '%s'\n" % str(e))
+ sys.exit(e.errno)
+ except portage.exception.ParseError as e:
+ sys.stderr.write("%s\n" % str(e))
+ sys.exit(1)
+ except portage.exception.AmbiguousPackageName as e:
+ # Multiple matches thrown from cpv_expand
+ pkgs = e.args[0]
+ # An error has occurred so we writemsg to stderr and exit nonzero.
+ portage.writemsg("You specified an unqualified atom that matched multiple packages:\n", noiselevel=-1)
+ for pkg in pkgs:
+ portage.writemsg("* %s\n" % pkg, noiselevel=-1)
+ portage.writemsg("\nPlease use a more specific atom.\n", noiselevel=-1)
+ sys.exit(1)
+
+main()
+
+#-----------------------------------------------------------------------------
diff --git a/portage_with_autodep/bin/quickpkg b/portage_with_autodep/bin/quickpkg
new file mode 100755
index 0000000..09723f5
--- /dev/null
+++ b/portage_with_autodep/bin/quickpkg
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import errno
+import math
+import optparse
+import signal
+import sys
+import tarfile
+
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+
+from portage import os
+from portage import xpak
+from portage.dbapi.dep_expand import dep_expand
+from portage.dep import use_reduce
+from portage.exception import InvalidAtom, InvalidData, InvalidDependString, \
+ PackageSetNotFound, PermissionDenied
+from portage.util import ConfigProtect, ensure_dirs, shlex_split
+from portage.dbapi.vartree import dblink, tar_contents
+from portage.checksum import perform_md5
+from portage._sets import load_default_config, SETPREFIX
+
+def quickpkg_atom(options, infos, arg, eout):
+ settings = portage.settings
+ root = portage.settings["ROOT"]
+ trees = portage.db[root]
+ vartree = trees["vartree"]
+ vardb = vartree.dbapi
+ bintree = trees["bintree"]
+
+ include_config = options.include_config == "y"
+ include_unmodified_config = options.include_unmodified_config == "y"
+ fix_metadata_keys = ["PF", "CATEGORY"]
+
+ try:
+ atom = dep_expand(arg, mydb=vardb, settings=vartree.settings)
+ except ValueError as e:
+ # Multiple matches thrown from cpv_expand
+ eout.eerror("Please use a more specific atom: %s" % \
+ " ".join(e.args[0]))
+ del e
+ infos["missing"].append(arg)
+ return
+ except (InvalidAtom, InvalidData):
+ eout.eerror("Invalid atom: %s" % (arg,))
+ infos["missing"].append(arg)
+ return
+ if atom[:1] == '=' and arg[:1] != '=':
+ # dep_expand() allows missing '=' but it's really invalid
+ eout.eerror("Invalid atom: %s" % (arg,))
+ infos["missing"].append(arg)
+ return
+
+ matches = vardb.match(atom)
+ pkgs_for_arg = 0
+ for cpv in matches:
+ excluded_config_files = []
+ bintree.prevent_collision(cpv)
+ cat, pkg = portage.catsplit(cpv)
+ dblnk = dblink(cat, pkg, root,
+ vartree.settings, treetype="vartree",
+ vartree=vartree)
+ have_lock = False
+ try:
+ dblnk.lockdb()
+ have_lock = True
+ except PermissionDenied:
+ pass
+ try:
+ if not dblnk.exists():
+ # unmerged by a concurrent process
+ continue
+ iuse, use, restrict = vardb.aux_get(cpv,
+ ["IUSE","USE","RESTRICT"])
+ iuse = [ x.lstrip("+-") for x in iuse.split() ]
+ use = use.split()
+ try:
+ restrict = use_reduce(restrict, uselist=use, flat=True)
+ except InvalidDependString as e:
+ eout.eerror("Invalid RESTRICT metadata " + \
+ "for '%s': %s; skipping" % (cpv, str(e)))
+ del e
+ continue
+ if "bindist" in iuse and "bindist" not in use:
+ eout.ewarn("%s: package was emerged with USE=-bindist!" % cpv)
+ eout.ewarn("%s: it might not be legal to redistribute this." % cpv)
+ elif "bindist" in restrict:
+ eout.ewarn("%s: package has RESTRICT=bindist!" % cpv)
+ eout.ewarn("%s: it might not be legal to redistribute this." % cpv)
+ eout.ebegin("Building package for %s" % cpv)
+ pkgs_for_arg += 1
+ contents = dblnk.getcontents()
+ protect = None
+ if not include_config:
+ confprot = ConfigProtect(root,
+ shlex_split(settings.get("CONFIG_PROTECT", "")),
+ shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
+ def protect(filename):
+ if not confprot.isprotected(filename):
+ return False
+ if include_unmodified_config:
+ file_data = contents[filename]
+ if file_data[0] == "obj":
+ orig_md5 = file_data[2].lower()
+ cur_md5 = perform_md5(filename, calc_prelink=1)
+ if orig_md5 == cur_md5:
+ return False
+ excluded_config_files.append(filename)
+ return True
+ existing_metadata = dict(zip(fix_metadata_keys,
+ vardb.aux_get(cpv, fix_metadata_keys)))
+ category, pf = portage.catsplit(cpv)
+ required_metadata = {}
+ required_metadata["CATEGORY"] = category
+ required_metadata["PF"] = pf
+ update_metadata = {}
+ for k, v in required_metadata.items():
+ if v != existing_metadata[k]:
+ update_metadata[k] = v
+ if update_metadata:
+ vardb.aux_update(cpv, update_metadata)
+ xpdata = xpak.xpak(dblnk.dbdir)
+ binpkg_tmpfile = os.path.join(bintree.pkgdir,
+ cpv + ".tbz2." + str(os.getpid()))
+ ensure_dirs(os.path.dirname(binpkg_tmpfile))
+ tar = tarfile.open(binpkg_tmpfile, "w:bz2")
+ tar_contents(contents, root, tar, protect=protect)
+ tar.close()
+ xpak.tbz2(binpkg_tmpfile).recompose_mem(xpdata)
+ finally:
+ if have_lock:
+ dblnk.unlockdb()
+ bintree.inject(cpv, filename=binpkg_tmpfile)
+ binpkg_path = bintree.getname(cpv)
+ try:
+ s = os.stat(binpkg_path)
+ except OSError as e:
+ # Sanity check, shouldn't happen normally.
+ eout.eend(1)
+ eout.eerror(str(e))
+ del e
+ eout.eerror("Failed to create package: '%s'" % binpkg_path)
+ else:
+ eout.eend(0)
+ infos["successes"].append((cpv, s.st_size))
+ infos["config_files_excluded"] += len(excluded_config_files)
+ for filename in excluded_config_files:
+ eout.ewarn("Excluded config: '%s'" % filename)
+ if not pkgs_for_arg:
+ eout.eerror("Could not find anything " + \
+ "to match '%s'; skipping" % arg)
+ infos["missing"].append(arg)
+
+def quickpkg_set(options, infos, arg, eout):
+ root = portage.settings["ROOT"]
+ trees = portage.db[root]
+ vartree = trees["vartree"]
+
+ settings = vartree.settings
+ settings._init_dirs()
+ setconfig = load_default_config(settings, trees)
+ sets = setconfig.getSets()
+
+ set = arg[1:]
+ if not set in sets:
+ eout.eerror("Package set not found: '%s'; skipping" % (arg,))
+ infos["missing"].append(arg)
+ return
+
+ try:
+ atoms = setconfig.getSetAtoms(set)
+ except PackageSetNotFound as e:
+ eout.eerror("Failed to process package set '%s' because " % set +
+ "it contains the non-existent package set '%s'; skipping" % e)
+ infos["missing"].append(arg)
+ return
+
+ for atom in atoms:
+ quickpkg_atom(options, infos, atom, eout)
+
+def quickpkg_main(options, args, eout):
+ root = portage.settings["ROOT"]
+ trees = portage.db[root]
+ bintree = trees["bintree"]
+
+ try:
+ ensure_dirs(bintree.pkgdir)
+ except portage.exception.PortageException:
+ pass
+ if not os.access(bintree.pkgdir, os.W_OK):
+ eout.eerror("No write access to '%s'" % bintree.pkgdir)
+ return errno.EACCES
+
+ infos = {}
+ infos["successes"] = []
+ infos["missing"] = []
+ infos["config_files_excluded"] = 0
+ for arg in args:
+ if arg[0] == SETPREFIX:
+ quickpkg_set(options, infos, arg, eout)
+ else:
+ quickpkg_atom(options, infos, arg, eout)
+
+ if not infos["successes"]:
+ eout.eerror("No packages found")
+ return 1
+ print()
+ eout.einfo("Packages now in '%s':" % bintree.pkgdir)
+ units = {10:'K', 20:'M', 30:'G', 40:'T',
+ 50:'P', 60:'E', 70:'Z', 80:'Y'}
+ for cpv, size in infos["successes"]:
+ if not size:
+ # avoid OverflowError in math.log()
+ size_str = "0"
+ else:
+ power_of_2 = math.log(size, 2)
+ power_of_2 = 10*int(power_of_2/10)
+ unit = units.get(power_of_2)
+ if unit:
+ size = float(size)/(2**power_of_2)
+ size_str = "%.1f" % size
+ if len(size_str) > 4:
+ # emulate `du -h`, don't show too many sig figs
+ size_str = str(int(size))
+ size_str += unit
+ else:
+ size_str = str(size)
+ eout.einfo("%s: %s" % (cpv, size_str))
+ if infos["config_files_excluded"]:
+ print()
+ eout.ewarn("Excluded config files: %d" % infos["config_files_excluded"])
+ eout.ewarn("See --help if you would like to include config files.")
+ if infos["missing"]:
+ print()
+ eout.ewarn("The following packages could not be found:")
+ eout.ewarn(" ".join(infos["missing"]))
+ return 2
+ return os.EX_OK
+
+if __name__ == "__main__":
+ usage = "quickpkg [options] <list of package atoms or package sets>"
+ parser = optparse.OptionParser(usage=usage)
+ parser.add_option("--umask",
+ default="0077",
+ help="umask used during package creation (default is 0077)")
+ parser.add_option("--ignore-default-opts",
+ action="store_true",
+ help="do not use the QUICKPKG_DEFAULT_OPTS environment variable")
+ parser.add_option("--include-config",
+ type="choice",
+ choices=["y","n"],
+ default="n",
+ metavar="<y|n>",
+ help="include all files protected by CONFIG_PROTECT (as a security precaution, default is 'n')")
+ parser.add_option("--include-unmodified-config",
+ type="choice",
+ choices=["y","n"],
+ default="n",
+ metavar="<y|n>",
+ help="include files protected by CONFIG_PROTECT that have not been modified since installation (as a security precaution, default is 'n')")
+ options, args = parser.parse_args(sys.argv[1:])
+ if not options.ignore_default_opts:
+ default_opts = portage.settings.get("QUICKPKG_DEFAULT_OPTS","").split()
+ options, args = parser.parse_args(default_opts + sys.argv[1:])
+ if not args:
+ parser.error("no packages atoms given")
+ try:
+ umask = int(options.umask, 8)
+ except ValueError:
+ parser.error("invalid umask: %s" % options.umask)
+ # We need to ensure a sane umask for the packages that will be created.
+ old_umask = os.umask(umask)
+ eout = portage.output.EOutput()
+ def sigwinch_handler(signum, frame):
+ lines, eout.term_columns = portage.output.get_term_size()
+ signal.signal(signal.SIGWINCH, sigwinch_handler)
+ try:
+ retval = quickpkg_main(options, args, eout)
+ finally:
+ os.umask(old_umask)
+ signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+ sys.exit(retval)
diff --git a/portage_with_autodep/bin/regenworld b/portage_with_autodep/bin/regenworld
new file mode 100755
index 0000000..6b5af4c
--- /dev/null
+++ b/portage_with_autodep/bin/regenworld
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+
+from portage import os
+from portage._sets.files import StaticFileSet, WorldSelectedSet
+
+import re
+import tempfile
+import textwrap
+
+__candidatematcher__ = re.compile("^[0-9]+: \\*\\*\\* emerge ")
+__noncandidatematcher__ = re.compile(" sync( |$)| clean( |$)| search( |$)|--oneshot|--fetchonly| unmerge( |$)")
+
+def issyspkg(pkgline):
+ return (pkgline[0] == "*")
+
+def iscandidate(logline):
+ return (__candidatematcher__.match(logline) \
+ and not __noncandidatematcher__.search(logline))
+
+def getpkginfo(logline):
+ logline = re.sub("^[0-9]+: \\*\\*\\* emerge ", "", logline)
+ logline = logline.strip()
+ logline = re.sub("(\\S+\\.(ebuild|tbz2))|(--\\S+)|inject ", "", logline)
+ return logline.strip()
+
+__uniqlist__ = []
+def isunwanted(pkgline):
+ if pkgline in ["world", "system", "depclean", "info", "regen", ""]:
+ return False
+ elif pkgline in __uniqlist__:
+ return False
+ elif not re.search("^[a-zA-Z<>=~]", pkgline):
+ return False
+ else:
+ __uniqlist__.append(pkgline)
+ return True
+
+root = portage.settings['ROOT']
+eroot = portage.settings['EROOT']
+world_file = os.path.join(eroot, portage.WORLD_FILE)
+
+# show a little description if we have arguments
+if len(sys.argv) >= 2 and sys.argv[1] in ["-h", "--help"]:
+ print("This script regenerates the portage world file by checking the portage")
+ print("logfile for all actions that you've done in the past. It ignores any")
+ print("arguments except --help. It is recommended that you make a backup of")
+ print("your existing world file (%s) before using this tool." % world_file)
+ sys.exit(0)
+
+worldlist = portage.grabfile(world_file)
+syslist = [x for x in portage.settings.packages if issyspkg(x)]
+
+logfile = portage.grabfile(os.path.join(eroot, "var/log/emerge.log"))
+biglist = [getpkginfo(x) for x in logfile if iscandidate(x)]
+tmplist = []
+for l in biglist:
+ tmplist += l.split()
+biglist = [x for x in tmplist if isunwanted(x)]
+#for p in biglist:
+# print(p)
+#sys.exit(0)
+
+# resolving virtuals
+realsyslist = []
+for mykey in syslist:
+ # drop the asterix
+ mykey = mykey[1:]
+ #print("candidate:",mykey)
+ mylist = portage.db[root]["vartree"].dbapi.match(mykey)
+ if mylist:
+ mykey=portage.cpv_getkey(mylist[0])
+ if mykey not in realsyslist:
+ realsyslist.append(mykey)
+
+for mykey in biglist:
+ #print("checking:",mykey)
+ try:
+ mylist = portage.db[root]["vartree"].dbapi.match(mykey)
+ except (portage.exception.InvalidAtom, KeyError):
+ if "--debug" in sys.argv:
+ print("* ignoring broken log entry for %s (likely injected)" % mykey)
+ except ValueError as e:
+ try:
+ print("* %s is an ambiguous package name, candidates are:\n%s" % (mykey, e))
+ except AttributeError:
+ # FIXME: Find out what causes this (bug #344845).
+ print("* %s is an ambiguous package name" % (mykey,))
+ continue
+ if mylist:
+ #print "mylist:",mylist
+ myfavkey=portage.cpv_getkey(mylist[0])
+ if (myfavkey not in realsyslist) and (myfavkey not in worldlist):
+ print("add to world:",myfavkey)
+ worldlist.append(myfavkey)
+
+if not worldlist:
+ pass
+else:
+ existing_set = WorldSelectedSet(eroot)
+ existing_set.load()
+
+ if not existing_set:
+ existing_set.replace(worldlist)
+ else:
+ old_world = existing_set._filename
+ fd, tmp_filename = tempfile.mkstemp(suffix=".tmp",
+ prefix=os.path.basename(old_world) + ".",
+ dir=os.path.dirname(old_world))
+ os.close(fd)
+
+ new_set = StaticFileSet(tmp_filename)
+ new_set.update(worldlist)
+
+ if existing_set.getAtoms() == new_set.getAtoms():
+ os.unlink(tmp_filename)
+ else:
+ new_set.write()
+
+ msg = "Please review differences between old and new files, " + \
+ "and replace the old file if desired."
+
+ portage.util.writemsg_stdout("\n",
+ noiselevel=-1)
+ for line in textwrap.wrap(msg, 65):
+ portage.util.writemsg_stdout("%s\n" % line,
+ noiselevel=-1)
+ portage.util.writemsg_stdout("\n",
+ noiselevel=-1)
+ portage.util.writemsg_stdout(" old: %s\n\n" % old_world,
+ noiselevel=-1)
+ portage.util.writemsg_stdout(" new: %s\n\n" % tmp_filename,
+ noiselevel=-1)
diff --git a/portage_with_autodep/bin/repoman b/portage_with_autodep/bin/repoman
new file mode 100755
index 0000000..f1fbc24
--- /dev/null
+++ b/portage_with_autodep/bin/repoman
@@ -0,0 +1,2672 @@
+#!/usr/bin/python -O
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Next to do: dep syntax checking in mask files
+# Then, check to make sure deps are satisfiable (to avoid "can't find match for" problems)
+# that last one is tricky because multiple profiles need to be checked.
+
+from __future__ import print_function
+
+import calendar
+import copy
+import errno
+import formatter
+import io
+import logging
+import optparse
+import re
+import signal
+import stat
+import sys
+import tempfile
+import time
+import platform
+
+try:
+ from urllib.request import urlopen as urllib_request_urlopen
+except ImportError:
+ from urllib import urlopen as urllib_request_urlopen
+
+from itertools import chain
+from stat import S_ISDIR
+
+try:
+ import portage
+except ImportError:
+ from os import path as osp
+ sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+ import portage
+portage._disable_legacy_globals()
+portage.dep._internal_warnings = True
+
+try:
+ import xml.etree.ElementTree
+ from xml.parsers.expat import ExpatError
+except ImportError:
+ msg = ["Please enable python's \"xml\" USE flag in order to use repoman."]
+ from portage.output import EOutput
+ out = EOutput()
+ for line in msg:
+ out.eerror(line)
+ sys.exit(1)
+
+from portage import os
+from portage import subprocess_getstatusoutput
+from portage import _encodings
+from portage import _unicode_encode
+from repoman.checks import run_checks
+from repoman import utilities
+from repoman.herdbase import make_herd_base
+from _emerge.Package import Package
+from _emerge.RootConfig import RootConfig
+from _emerge.userquery import userquery
+import portage.checksum
+import portage.const
+from portage import cvstree, normalize_path
+from portage import util
+from portage.exception import (FileNotFound, MissingParameter,
+ ParseError, PermissionDenied)
+from portage.manifest import Manifest
+from portage.process import find_binary, spawn
+from portage.output import bold, create_color_func, \
+ green, nocolor, red
+from portage.output import ConsoleStyleFile, StyleWriter
+from portage.util import cmp_sort_key, writemsg_level
+from portage.package.ebuild.digestgen import digestgen
+from portage.eapi import eapi_has_slot_deps, \
+ eapi_has_use_deps, eapi_has_strong_blocks, eapi_has_iuse_defaults, \
+ eapi_has_required_use, eapi_has_use_dep_defaults
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+util.initialize_logger()
+
+# 14 is the length of DESCRIPTION=""
+max_desc_len = 100
+allowed_filename_chars="a-zA-Z0-9._-+:"
+disallowed_filename_chars_re = re.compile(r'[^a-zA-Z0-9._\-+:]')
+pv_toolong_re = re.compile(r'[0-9]{19,}')
+bad = create_color_func("BAD")
+
+# A sane umask is needed for files that portage creates.
+os.umask(0o22)
+# Repoman sets it's own ACCEPT_KEYWORDS and we don't want it to
+# behave incrementally.
+repoman_incrementals = tuple(x for x in \
+ portage.const.INCREMENTALS if x != 'ACCEPT_KEYWORDS')
+repoman_settings = portage.config(local_config=False)
+repoman_settings.lock()
+
+if repoman_settings.get("NOCOLOR", "").lower() in ("yes", "true") or \
+ repoman_settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ nocolor()
+
+def warn(txt):
+ print("repoman: " + txt)
+
+def err(txt):
+ warn(txt)
+ sys.exit(1)
+
+def exithandler(signum=None, frame=None):
+ logging.fatal("Interrupted; exiting...")
+ if signum is None:
+ sys.exit(1)
+ else:
+ sys.exit(128 + signum)
+
+signal.signal(signal.SIGINT,exithandler)
+
+class RepomanHelpFormatter(optparse.IndentedHelpFormatter):
+ """Repoman needs it's own HelpFormatter for now, because the default ones
+ murder the help text."""
+
+ def __init__(self, indent_increment=1, max_help_position=24, width=150, short_first=1):
+ optparse.HelpFormatter.__init__(self, indent_increment, max_help_position, width, short_first)
+
+ def format_description(self, description):
+ return description
+
+class RepomanOptionParser(optparse.OptionParser):
+ """Add the on_tail function, ruby has it, optionParser should too
+ """
+
+ def __init__(self, *args, **kwargs):
+ optparse.OptionParser.__init__(self, *args, **kwargs)
+ self.tail = ""
+
+ def on_tail(self, description):
+ self.tail += description
+
+ def format_help(self, formatter=None):
+ result = optparse.OptionParser.format_help(self, formatter)
+ result += self.tail
+ return result
+
+
+def ParseArgs(argv, qahelp):
+ """This function uses a customized optionParser to parse command line arguments for repoman
+ Args:
+ argv - a sequence of command line arguments
+ qahelp - a dict of qa warning to help message
+ Returns:
+ (opts, args), just like a call to parser.parse_args()
+ """
+
+ if argv and sys.hexversion < 0x3000000 and not isinstance(argv[0], unicode):
+ argv = [portage._unicode_decode(x) for x in argv]
+
+ modes = {
+ 'commit' : 'Run a scan then commit changes',
+ 'ci' : 'Run a scan then commit changes',
+ 'fix' : 'Fix simple QA issues (stray digests, missing digests)',
+ 'full' : 'Scan directory tree and print all issues (not a summary)',
+ 'help' : 'Show this screen',
+ 'manifest' : 'Generate a Manifest (fetches files if necessary)',
+ 'manifest-check' : 'Check Manifests for missing or incorrect digests',
+ 'scan' : 'Scan directory tree for QA issues'
+ }
+
+ mode_keys = list(modes)
+ mode_keys.sort()
+
+ parser = RepomanOptionParser(formatter=RepomanHelpFormatter(), usage="%prog [options] [mode]")
+ parser.description = green(" ".join((os.path.basename(argv[0]), "1.2")))
+ parser.description += "\nCopyright 1999-2007 Gentoo Foundation"
+ parser.description += "\nDistributed under the terms of the GNU General Public License v2"
+ parser.description += "\nmodes: " + " | ".join(map(green,mode_keys))
+
+ parser.add_option('-a', '--ask', dest='ask', action='store_true', default=False,
+ help='Request a confirmation before commiting')
+
+ parser.add_option('-m', '--commitmsg', dest='commitmsg',
+ help='specify a commit message on the command line')
+
+ parser.add_option('-M', '--commitmsgfile', dest='commitmsgfile',
+ help='specify a path to a file that contains a commit message')
+
+ parser.add_option('-p', '--pretend', dest='pretend', default=False,
+ action='store_true', help='don\'t commit or fix anything; just show what would be done')
+
+ parser.add_option('-q', '--quiet', dest="quiet", action="count", default=0,
+ help='do not print unnecessary messages')
+
+ parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
+ help='Commit with QA violations')
+
+ parser.add_option('--vcs', dest='vcs',
+ help='Force using specific VCS instead of autodetection')
+
+ parser.add_option('-v', '--verbose', dest="verbosity", action='count',
+ help='be very verbose in output', default=0)
+
+ parser.add_option('-V', '--version', dest='version', action='store_true',
+ help='show version info')
+
+ parser.add_option('-x', '--xmlparse', dest='xml_parse', action='store_true',
+ default=False, help='forces the metadata.xml parse check to be carried out')
+
+ parser.add_option('-i', '--ignore-arches', dest='ignore_arches', action='store_true',
+ default=False, help='ignore arch-specific failures (where arch != host)')
+
+ parser.add_option('-I', '--ignore-masked', dest='ignore_masked', action='store_true',
+ default=False, help='ignore masked packages (not allowed with commit mode)')
+
+ parser.add_option('-d', '--include-dev', dest='include_dev', action='store_true',
+ default=False, help='include dev profiles in dependency checks')
+
+ parser.add_option('--unmatched-removal', dest='unmatched_removal', action='store_true',
+ default=False, help='enable strict checking of package.mask and package.unmask files for unmatched removal atoms')
+
+ parser.add_option('--without-mask', dest='without_mask', action='store_true',
+ default=False, help='behave as if no package.mask entries exist (not allowed with commit mode)')
+
+ parser.add_option('--mode', type='choice', dest='mode', choices=list(modes),
+ help='specify which mode repoman will run in (default=full)')
+
+ parser.on_tail("\n " + green("Modes".ljust(20) + " Description\n"))
+
+ for k in mode_keys:
+ parser.on_tail(" %s %s\n" % (k.ljust(20), modes[k]))
+
+ parser.on_tail("\n " + green("QA keyword".ljust(20) + " Description\n"))
+
+ sorted_qa = list(qahelp)
+ sorted_qa.sort()
+ for k in sorted_qa:
+ parser.on_tail(" %s %s\n" % (k.ljust(20), qahelp[k]))
+
+ opts, args = parser.parse_args(argv[1:])
+
+ if opts.mode == 'help':
+ parser.print_help(short=False)
+
+ for arg in args:
+ if arg in modes:
+ if not opts.mode:
+ opts.mode = arg
+ break
+ else:
+ parser.error("invalid mode: %s" % arg)
+
+ if not opts.mode:
+ opts.mode = 'full'
+
+ if opts.mode == 'ci':
+ opts.mode = 'commit' # backwards compat shortcut
+
+ if opts.mode == 'commit' and not (opts.force or opts.pretend):
+ if opts.ignore_masked:
+ parser.error('Commit mode and --ignore-masked are not compatible')
+ if opts.without_mask:
+ parser.error('Commit mode and --without-mask are not compatible')
+
+ # Use the verbosity and quiet options to fiddle with the loglevel appropriately
+ for val in range(opts.verbosity):
+ logger = logging.getLogger()
+ logger.setLevel(logger.getEffectiveLevel() - 10)
+
+ for val in range(opts.quiet):
+ logger = logging.getLogger()
+ logger.setLevel(logger.getEffectiveLevel() + 10)
+
+ return (opts, args)
+
+qahelp={
+ "CVS/Entries.IO_error":"Attempting to commit, and an IO error was encountered access the Entries file",
+ "desktop.invalid":"desktop-file-validate reports errors in a *.desktop file",
+ "ebuild.invalidname":"Ebuild files with a non-parseable or syntactically incorrect name (or using 2.1 versioning extensions)",
+ "ebuild.namenomatch":"Ebuild files that do not have the same name as their parent directory",
+ "changelog.ebuildadded":"An ebuild was added but the ChangeLog was not modified",
+ "changelog.missing":"Missing ChangeLog files",
+ "ebuild.notadded":"Ebuilds that exist but have not been added to cvs",
+ "ebuild.patches":"PATCHES variable should be a bash array to ensure white space safety",
+ "changelog.notadded":"ChangeLogs that exist but have not been added to cvs",
+ "dependency.unknown" : "Ebuild has a dependency that refers to an unknown package (which may be provided by an overlay)",
+ "file.executable":"Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do note need the executable bit",
+ "file.size":"Files in the files directory must be under 20 KiB",
+ "file.size.fatal":"Files in the files directory must be under 60 KiB",
+ "file.name":"File/dir name must be composed of only the following chars: %s " % allowed_filename_chars,
+ "file.UTF8":"File is not UTF8 compliant",
+ "inherit.autotools":"Ebuild inherits autotools but does not call eautomake, eautoconf or eautoreconf",
+ "inherit.deprecated":"Ebuild inherits a deprecated eclass",
+ "java.eclassesnotused":"With virtual/jdk in DEPEND you must inherit a java eclass",
+ "wxwidgets.eclassnotused":"Ebuild DEPENDs on x11-libs/wxGTK without inheriting wxwidgets.eclass",
+ "KEYWORDS.dropped":"Ebuilds that appear to have dropped KEYWORDS for some arch",
+ "KEYWORDS.missing":"Ebuilds that have a missing or empty KEYWORDS variable",
+ "KEYWORDS.stable":"Ebuilds that have been added directly with stable KEYWORDS",
+ "KEYWORDS.stupid":"Ebuilds that use KEYWORDS=-* instead of package.mask",
+ "LICENSE.missing":"Ebuilds that have a missing or empty LICENSE variable",
+ "LICENSE.virtual":"Virtuals that have a non-empty LICENSE variable",
+ "DESCRIPTION.missing":"Ebuilds that have a missing or empty DESCRIPTION variable",
+ "DESCRIPTION.toolong":"DESCRIPTION is over %d characters" % max_desc_len,
+ "EAPI.definition":"EAPI is defined after an inherit call (must be defined before)",
+ "EAPI.deprecated":"Ebuilds that use features that are deprecated in the current EAPI",
+ "EAPI.incompatible":"Ebuilds that use features that are only available with a different EAPI",
+ "EAPI.unsupported":"Ebuilds that have an unsupported EAPI version (you must upgrade portage)",
+ "SLOT.invalid":"Ebuilds that have a missing or invalid SLOT variable value",
+ "HOMEPAGE.missing":"Ebuilds that have a missing or empty HOMEPAGE variable",
+ "HOMEPAGE.virtual":"Virtuals that have a non-empty HOMEPAGE variable",
+ "DEPEND.bad":"User-visible ebuilds with bad DEPEND settings (matched against *visible* ebuilds)",
+ "RDEPEND.bad":"User-visible ebuilds with bad RDEPEND settings (matched against *visible* ebuilds)",
+ "PDEPEND.bad":"User-visible ebuilds with bad PDEPEND settings (matched against *visible* ebuilds)",
+ "DEPEND.badmasked":"Masked ebuilds with bad DEPEND settings (matched against *all* ebuilds)",
+ "RDEPEND.badmasked":"Masked ebuilds with RDEPEND settings (matched against *all* ebuilds)",
+ "PDEPEND.badmasked":"Masked ebuilds with PDEPEND settings (matched against *all* ebuilds)",
+ "DEPEND.badindev":"User-visible ebuilds with bad DEPEND settings (matched against *visible* ebuilds) in developing arch",
+ "RDEPEND.badindev":"User-visible ebuilds with bad RDEPEND settings (matched against *visible* ebuilds) in developing arch",
+ "PDEPEND.badindev":"User-visible ebuilds with bad PDEPEND settings (matched against *visible* ebuilds) in developing arch",
+ "DEPEND.badmaskedindev":"Masked ebuilds with bad DEPEND settings (matched against *all* ebuilds) in developing arch",
+ "RDEPEND.badmaskedindev":"Masked ebuilds with RDEPEND settings (matched against *all* ebuilds) in developing arch",
+ "PDEPEND.badmaskedindev":"Masked ebuilds with PDEPEND settings (matched against *all* ebuilds) in developing arch",
+ "PDEPEND.suspect":"PDEPEND contains a package that usually only belongs in DEPEND.",
+ "DEPEND.syntax":"Syntax error in DEPEND (usually an extra/missing space/parenthesis)",
+ "RDEPEND.syntax":"Syntax error in RDEPEND (usually an extra/missing space/parenthesis)",
+ "PDEPEND.syntax":"Syntax error in PDEPEND (usually an extra/missing space/parenthesis)",
+ "DEPEND.badtilde":"DEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
+ "RDEPEND.badtilde":"RDEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
+ "PDEPEND.badtilde":"PDEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
+ "LICENSE.syntax":"Syntax error in LICENSE (usually an extra/missing space/parenthesis)",
+ "PROVIDE.syntax":"Syntax error in PROVIDE (usually an extra/missing space/parenthesis)",
+ "PROPERTIES.syntax":"Syntax error in PROPERTIES (usually an extra/missing space/parenthesis)",
+ "RESTRICT.syntax":"Syntax error in RESTRICT (usually an extra/missing space/parenthesis)",
+ "REQUIRED_USE.syntax":"Syntax error in REQUIRED_USE (usually an extra/missing space/parenthesis)",
+ "SRC_URI.syntax":"Syntax error in SRC_URI (usually an extra/missing space/parenthesis)",
+ "SRC_URI.mirror":"A uri listed in profiles/thirdpartymirrors is found in SRC_URI",
+ "ebuild.syntax":"Error generating cache entry for ebuild; typically caused by ebuild syntax error or digest verification failure",
+ "ebuild.output":"A simple sourcing of the ebuild produces output; this breaks ebuild policy.",
+ "ebuild.nesteddie":"Placing 'die' inside ( ) prints an error, but doesn't stop the ebuild.",
+ "variable.invalidchar":"A variable contains an invalid character that is not part of the ASCII character set",
+ "variable.readonly":"Assigning a readonly variable",
+ "variable.usedwithhelpers":"Ebuild uses D, ROOT, ED, EROOT or EPREFIX with helpers",
+ "LIVEVCS.stable":"This ebuild is a live checkout from a VCS but has stable keywords.",
+ "LIVEVCS.unmasked":"This ebuild is a live checkout from a VCS but has keywords and is not masked in the global package.mask.",
+ "IUSE.invalid":"This ebuild has a variable in IUSE that is not in the use.desc or its metadata.xml file",
+ "IUSE.missing":"This ebuild has a USE conditional which references a flag that is not listed in IUSE",
+ "IUSE.undefined":"This ebuild does not define IUSE (style guideline says to define IUSE even when empty)",
+ "LICENSE.invalid":"This ebuild is listing a license that doesnt exist in portages license/ dir.",
+ "KEYWORDS.invalid":"This ebuild contains KEYWORDS that are not listed in profiles/arch.list or for which no valid profile was found",
+ "RDEPEND.implicit":"RDEPEND is unset in the ebuild which triggers implicit RDEPEND=$DEPEND assignment (prior to EAPI 4)",
+ "RDEPEND.suspect":"RDEPEND contains a package that usually only belongs in DEPEND.",
+ "RESTRICT.invalid":"This ebuild contains invalid RESTRICT values.",
+ "digest.assumed":"Existing digest must be assumed correct (Package level only)",
+ "digest.missing":"Some files listed in SRC_URI aren't referenced in the Manifest",
+ "digest.unused":"Some files listed in the Manifest aren't referenced in SRC_URI",
+ "ebuild.nostable":"There are no ebuilds that are marked as stable for your ARCH",
+ "ebuild.allmasked":"All ebuilds are masked for this package (Package level only)",
+ "ebuild.majorsyn":"This ebuild has a major syntax error that may cause the ebuild to fail partially or fully",
+ "ebuild.minorsyn":"This ebuild has a minor syntax error that contravenes gentoo coding style",
+ "ebuild.badheader":"This ebuild has a malformed header",
+ "eprefixify.defined":"The ebuild uses eprefixify, but does not inherit the prefix eclass",
+ "manifest.bad":"Manifest has missing or incorrect digests",
+ "metadata.missing":"Missing metadata.xml files",
+ "metadata.bad":"Bad metadata.xml files",
+ "metadata.warning":"Warnings in metadata.xml files",
+ "portage.internal":"The ebuild uses an internal Portage function",
+ "virtual.oldstyle":"The ebuild PROVIDEs an old-style virtual (see GLEP 37)",
+ "usage.obsolete":"The ebuild makes use of an obsolete construct",
+ "upstream.workaround":"The ebuild works around an upstream bug, an upstream bug should be filed and tracked in bugs.gentoo.org"
+}
+
+qacats = list(qahelp)
+qacats.sort()
+
+qawarnings = set((
+"changelog.missing",
+"changelog.notadded",
+"dependency.unknown",
+"digest.assumed",
+"digest.unused",
+"ebuild.notadded",
+"ebuild.nostable",
+"ebuild.allmasked",
+"ebuild.nesteddie",
+"desktop.invalid",
+"DEPEND.badmasked","RDEPEND.badmasked","PDEPEND.badmasked",
+"DEPEND.badindev","RDEPEND.badindev","PDEPEND.badindev",
+"DEPEND.badmaskedindev","RDEPEND.badmaskedindev","PDEPEND.badmaskedindev",
+"DEPEND.badtilde", "RDEPEND.badtilde", "PDEPEND.badtilde",
+"DESCRIPTION.toolong",
+"EAPI.deprecated",
+"HOMEPAGE.virtual",
+"LICENSE.virtual",
+"KEYWORDS.dropped",
+"KEYWORDS.stupid",
+"KEYWORDS.missing",
+"IUSE.undefined",
+"PDEPEND.suspect",
+"RDEPEND.implicit",
+"RDEPEND.suspect",
+"RESTRICT.invalid",
+"SRC_URI.mirror",
+"ebuild.minorsyn",
+"ebuild.badheader",
+"ebuild.patches",
+"file.size",
+"inherit.autotools",
+"inherit.deprecated",
+"java.eclassesnotused",
+"wxwidgets.eclassnotused",
+"metadata.warning",
+"portage.internal",
+"usage.obsolete",
+"upstream.workaround",
+"virtual.oldstyle",
+"LIVEVCS.stable",
+"LIVEVCS.unmasked",
+))
+
+non_ascii_re = re.compile(r'[^\x00-\x7f]')
+
+missingvars = ["KEYWORDS", "LICENSE", "DESCRIPTION", "HOMEPAGE"]
+allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_"))
+allvars.update(Package.metadata_keys)
+allvars = sorted(allvars)
+commitmessage=None
+for x in missingvars:
+ x += ".missing"
+ if x not in qacats:
+ logging.warn('* missingvars values need to be added to qahelp ("%s")' % x)
+ qacats.append(x)
+ qawarnings.add(x)
+
+valid_restrict = frozenset(["binchecks", "bindist",
+ "fetch", "installsources", "mirror",
+ "primaryuri", "strip", "test", "userpriv"])
+
+live_eclasses = frozenset([
+ "bzr",
+ "cvs",
+ "darcs",
+ "git",
+ "git-2",
+ "mercurial",
+ "subversion",
+ "tla",
+])
+
+suspect_rdepend = frozenset([
+ "app-arch/cabextract",
+ "app-arch/rpm2targz",
+ "app-doc/doxygen",
+ "dev-lang/nasm",
+ "dev-lang/swig",
+ "dev-lang/yasm",
+ "dev-perl/extutils-pkgconfig",
+ "dev-util/byacc",
+ "dev-util/cmake",
+ "dev-util/ftjam",
+ "dev-util/gperf",
+ "dev-util/gtk-doc",
+ "dev-util/gtk-doc-am",
+ "dev-util/intltool",
+ "dev-util/jam",
+ "dev-util/pkgconfig",
+ "dev-util/scons",
+ "dev-util/unifdef",
+ "dev-util/yacc",
+ "media-gfx/ebdftopcf",
+ "sys-apps/help2man",
+ "sys-devel/autoconf",
+ "sys-devel/automake",
+ "sys-devel/bin86",
+ "sys-devel/bison",
+ "sys-devel/dev86",
+ "sys-devel/flex",
+ "sys-devel/m4",
+ "sys-devel/pmake",
+ "virtual/linux-sources",
+ "x11-misc/bdftopcf",
+ "x11-misc/imake",
+])
+
+metadata_dtd_uri = 'http://www.gentoo.org/dtd/metadata.dtd'
+# force refetch if the local copy creation time is older than this
+metadata_dtd_ctime_interval = 60 * 60 * 24 * 7 # 7 days
+
+# file.executable
+no_exec = frozenset(["Manifest","ChangeLog","metadata.xml"])
+
+options, arguments = ParseArgs(sys.argv, qahelp)
+
+if options.version:
+ print("Portage", portage.VERSION)
+ sys.exit(0)
+
+# Set this to False when an extraordinary issue (generally
+# something other than a QA issue) makes it impossible to
+# commit (like if Manifest generation fails).
+can_force = True
+
+portdir, portdir_overlay, mydir = utilities.FindPortdir(repoman_settings)
+if portdir is None:
+ sys.exit(1)
+
+myreporoot = os.path.basename(portdir_overlay)
+myreporoot += mydir[len(portdir_overlay):]
+
+if options.vcs:
+ if options.vcs in ('cvs', 'svn', 'git', 'bzr', 'hg'):
+ vcs = options.vcs
+ else:
+ vcs = None
+else:
+ vcses = utilities.FindVCS()
+ if len(vcses) > 1:
+ print(red('*** Ambiguous workdir -- more than one VCS found at the same depth: %s.' % ', '.join(vcses)))
+ print(red('*** Please either clean up your workdir or specify --vcs option.'))
+ sys.exit(1)
+ elif vcses:
+ vcs = vcses[0]
+ else:
+ vcs = None
+
+# Note: We don't use ChangeLogs in distributed SCMs.
+# It will be generated on server side from scm log,
+# before package moves to the rsync server.
+# This is needed because we try to avoid merge collisions.
+check_changelog = vcs in ('cvs', 'svn')
+
+# Disable copyright/mtime check if vcs does not preserve mtime (bug #324075).
+vcs_preserves_mtime = vcs not in ('git',)
+
+vcs_local_opts = repoman_settings.get("REPOMAN_VCS_LOCAL_OPTS", "").split()
+vcs_global_opts = repoman_settings.get("REPOMAN_VCS_GLOBAL_OPTS")
+if vcs_global_opts is None:
+ if vcs in ('cvs', 'svn'):
+ vcs_global_opts = "-q"
+ else:
+ vcs_global_opts = ""
+vcs_global_opts = vcs_global_opts.split()
+
+if vcs == "cvs" and \
+ "commit" == options.mode and \
+ "RMD160" not in portage.checksum.hashorigin_map:
+ from portage.util import grablines
+ repo_lines = grablines("./CVS/Repository")
+ if repo_lines and \
+ "gentoo-x86" == repo_lines[0].strip().split(os.path.sep)[0]:
+ msg = "Please install " \
+ "pycrypto or enable python's ssl USE flag in order " \
+ "to enable RMD160 hash support. See bug #198398 for " \
+ "more information."
+ prefix = bad(" * ")
+ from textwrap import wrap
+ for line in wrap(msg, 70):
+ print(prefix + line)
+ sys.exit(1)
+ del repo_lines
+
+if options.mode == 'commit' and not options.pretend and not vcs:
+ logging.info("Not in a version controlled repository; enabling pretend mode.")
+ options.pretend = True
+
+# Ensure that PORTDIR_OVERLAY contains the repository corresponding to $PWD.
+repoman_settings = portage.config(local_config=False)
+repoman_settings['PORTDIR_OVERLAY'] = "%s %s" % \
+ (repoman_settings.get('PORTDIR_OVERLAY', ''), portdir_overlay)
+# We have to call the config constructor again so
+# that config.repositories is initialized correctly.
+repoman_settings = portage.config(local_config=False, env=dict(os.environ,
+ PORTDIR_OVERLAY=repoman_settings['PORTDIR_OVERLAY']))
+
+root = '/'
+trees = {
+ root : {'porttree' : portage.portagetree(root, settings=repoman_settings)}
+}
+portdb = trees[root]['porttree'].dbapi
+
+# Constrain dependency resolution to the master(s)
+# that are specified in layout.conf.
+portdir_overlay = os.path.realpath(portdir_overlay)
+repo_info = portdb._repo_info[portdir_overlay]
+portdb.porttrees = list(repo_info.eclass_db.porttrees)
+portdir = portdb.porttrees[0]
+
+# Generate an appropriate PORTDIR_OVERLAY value for passing into the
+# profile-specific config constructor calls.
+env = os.environ.copy()
+env['PORTDIR'] = portdir
+env['PORTDIR_OVERLAY'] = ' '.join(portdb.porttrees[1:])
+
+logging.info('Setting paths:')
+logging.info('PORTDIR = "' + portdir + '"')
+logging.info('PORTDIR_OVERLAY = "%s"' % env['PORTDIR_OVERLAY'])
+
+# It's confusing if these warnings are displayed without the user
+# being told which profile they come from, so disable them.
+env['FEATURES'] = env.get('FEATURES', '') + ' -unknown-features-warn'
+
+categories = []
+for path in set([portdir, portdir_overlay]):
+ categories.extend(portage.util.grabfile(
+ os.path.join(path, 'profiles', 'categories')))
+repoman_settings.categories = tuple(sorted(
+ portage.util.stack_lists([categories], incremental=1)))
+del categories
+
+portdb.settings = repoman_settings
+root_config = RootConfig(repoman_settings, trees[root], None)
+# We really only need to cache the metadata that's necessary for visibility
+# filtering. Anything else can be discarded to reduce memory consumption.
+portdb._aux_cache_keys.clear()
+portdb._aux_cache_keys.update(["EAPI", "KEYWORDS", "SLOT"])
+
+reposplit = myreporoot.split(os.path.sep)
+repolevel = len(reposplit)
+
+# check if it's in $PORTDIR/$CATEGORY/$PN , otherwise bail if commiting.
+# Reason for this is if they're trying to commit in just $FILESDIR/*, the Manifest needs updating.
+# this check ensures that repoman knows where it is, and the manifest recommit is at least possible.
+if options.mode == 'commit' and repolevel not in [1,2,3]:
+ print(red("***")+" Commit attempts *must* be from within a vcs co, category, or package directory.")
+ print(red("***")+" Attempting to commit from a packages files directory will be blocked for instance.")
+ print(red("***")+" This is intended behaviour, to ensure the manifest is recommitted for a package.")
+ print(red("***"))
+ err("Unable to identify level we're commiting from for %s" % '/'.join(reposplit))
+
+startdir = normalize_path(mydir)
+repodir = startdir
+for x in range(0, repolevel - 1):
+ repodir = os.path.dirname(repodir)
+repodir = os.path.realpath(repodir)
+
+def caterror(mycat):
+ err(mycat+" is not an official category. Skipping QA checks in this directory.\nPlease ensure that you add "+catdir+" to "+repodir+"/profiles/categories\nif it is a new category.")
+
+class ProfileDesc(object):
+ __slots__ = ('abs_path', 'arch', 'status', 'sub_path', 'tree_path',)
+ def __init__(self, arch, status, sub_path, tree_path):
+ self.arch = arch
+ self.status = status
+ if sub_path:
+ sub_path = normalize_path(sub_path.lstrip(os.sep))
+ self.sub_path = sub_path
+ self.tree_path = tree_path
+ if tree_path:
+ self.abs_path = os.path.join(tree_path, 'profiles', self.sub_path)
+ else:
+ self.abs_path = tree_path
+
+ def __str__(self):
+ if self.sub_path:
+ return self.sub_path
+ return 'empty profile'
+
+profile_list = []
+valid_profile_types = frozenset(['dev', 'exp', 'stable'])
+
+# get lists of valid keywords, licenses, and use
+kwlist = set()
+liclist = set()
+uselist = set()
+global_pmasklines = []
+
+for path in portdb.porttrees:
+ try:
+ liclist.update(os.listdir(os.path.join(path, "licenses")))
+ except OSError:
+ pass
+ kwlist.update(portage.grabfile(os.path.join(path,
+ "profiles", "arch.list")))
+
+ use_desc = portage.grabfile(os.path.join(path, 'profiles', 'use.desc'))
+ for x in use_desc:
+ x = x.split()
+ if x:
+ uselist.add(x[0])
+
+ expand_desc_dir = os.path.join(path, 'profiles', 'desc')
+ try:
+ expand_list = os.listdir(expand_desc_dir)
+ except OSError:
+ pass
+ else:
+ for fn in expand_list:
+ if not fn[-5:] == '.desc':
+ continue
+ use_prefix = fn[:-5].lower() + '_'
+ for x in portage.grabfile(os.path.join(expand_desc_dir, fn)):
+ x = x.split()
+ if x:
+ uselist.add(use_prefix + x[0])
+
+ global_pmasklines.append(portage.util.grabfile_package(
+ os.path.join(path, 'profiles', 'package.mask'), recursive=1, verify_eapi=True))
+
+ desc_path = os.path.join(path, 'profiles', 'profiles.desc')
+ try:
+ desc_file = io.open(_unicode_encode(desc_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ for i, x in enumerate(desc_file):
+ if x[0] == "#":
+ continue
+ arch = x.split()
+ if len(arch) == 0:
+ continue
+ if len(arch) != 3:
+ err("wrong format: \"" + bad(x.strip()) + "\" in " + \
+ desc_path + " line %d" % (i+1, ))
+ elif arch[0] not in kwlist:
+ err("invalid arch: \"" + bad(arch[0]) + "\" in " + \
+ desc_path + " line %d" % (i+1, ))
+ elif arch[2] not in valid_profile_types:
+ err("invalid profile type: \"" + bad(arch[2]) + "\" in " + \
+ desc_path + " line %d" % (i+1, ))
+ profile_desc = ProfileDesc(arch[0], arch[2], arch[1], path)
+ if not os.path.isdir(profile_desc.abs_path):
+ logging.error(
+ "Invalid %s profile (%s) for arch %s in %s line %d",
+ arch[2], arch[1], arch[0], desc_path, i+1)
+ continue
+ if os.path.exists(
+ os.path.join(profile_desc.abs_path, 'deprecated')):
+ continue
+ profile_list.append(profile_desc)
+ desc_file.close()
+
+repoman_settings['PORTAGE_ARCHLIST'] = ' '.join(sorted(kwlist))
+repoman_settings.backup_changes('PORTAGE_ARCHLIST')
+
+global_pmasklines = portage.util.stack_lists(global_pmasklines, incremental=1)
+global_pmaskdict = {}
+for x in global_pmasklines:
+ global_pmaskdict.setdefault(x.cp, []).append(x)
+del global_pmasklines
+
+def has_global_mask(pkg):
+ mask_atoms = global_pmaskdict.get(pkg.cp)
+ if mask_atoms:
+ pkg_list = [pkg]
+ for x in mask_atoms:
+ if portage.dep.match_from_list(x, pkg_list):
+ return x
+ return None
+
+# Ensure that profile sub_path attributes are unique. Process in reverse order
+# so that profiles with duplicate sub_path from overlays will override
+# profiles with the same sub_path from parent repos.
+profiles = {}
+profile_list.reverse()
+profile_sub_paths = set()
+for prof in profile_list:
+ if prof.sub_path in profile_sub_paths:
+ continue
+ profile_sub_paths.add(prof.sub_path)
+ profiles.setdefault(prof.arch, []).append(prof)
+
+# Use an empty profile for checking dependencies of
+# packages that have empty KEYWORDS.
+prof = ProfileDesc('**', 'stable', '', '')
+profiles.setdefault(prof.arch, []).append(prof)
+
+for x in repoman_settings.archlist():
+ if x[0] == "~":
+ continue
+ if x not in profiles:
+ print(red("\""+x+"\" doesn't have a valid profile listed in profiles.desc."))
+ print(red("You need to either \"cvs update\" your profiles dir or follow this"))
+ print(red("up with the "+x+" team."))
+ print()
+
+if not liclist:
+ logging.fatal("Couldn't find licenses?")
+ sys.exit(1)
+
+if not kwlist:
+ logging.fatal("Couldn't read KEYWORDS from arch.list")
+ sys.exit(1)
+
+if not uselist:
+ logging.fatal("Couldn't find use.desc?")
+ sys.exit(1)
+
+scanlist=[]
+if repolevel==2:
+ #we are inside a category directory
+ catdir=reposplit[-1]
+ if catdir not in repoman_settings.categories:
+ caterror(catdir)
+ mydirlist=os.listdir(startdir)
+ for x in mydirlist:
+ if x == "CVS" or x.startswith("."):
+ continue
+ if os.path.isdir(startdir+"/"+x):
+ scanlist.append(catdir+"/"+x)
+ repo_subdir = catdir + os.sep
+elif repolevel==1:
+ for x in repoman_settings.categories:
+ if not os.path.isdir(startdir+"/"+x):
+ continue
+ for y in os.listdir(startdir+"/"+x):
+ if y == "CVS" or y.startswith("."):
+ continue
+ if os.path.isdir(startdir+"/"+x+"/"+y):
+ scanlist.append(x+"/"+y)
+ repo_subdir = ""
+elif repolevel==3:
+ catdir = reposplit[-2]
+ if catdir not in repoman_settings.categories:
+ caterror(catdir)
+ scanlist.append(catdir+"/"+reposplit[-1])
+ repo_subdir = scanlist[-1] + os.sep
+else:
+ msg = 'Repoman is unable to determine PORTDIR or PORTDIR_OVERLAY' + \
+ ' from the current working directory'
+ logging.critical(msg)
+ sys.exit(1)
+
+repo_subdir_len = len(repo_subdir)
+scanlist.sort()
+
+logging.debug("Found the following packages to scan:\n%s" % '\n'.join(scanlist))
+
+def dev_keywords(profiles):
+ """
+ Create a set of KEYWORDS values that exist in 'dev'
+ profiles. These are used
+ to trigger a message notifying the user when they might
+ want to add the --include-dev option.
+ """
+ type_arch_map = {}
+ for arch, arch_profiles in profiles.items():
+ for prof in arch_profiles:
+ arch_set = type_arch_map.get(prof.status)
+ if arch_set is None:
+ arch_set = set()
+ type_arch_map[prof.status] = arch_set
+ arch_set.add(arch)
+
+ dev_keywords = type_arch_map.get('dev', set())
+ dev_keywords.update(['~' + arch for arch in dev_keywords])
+ return frozenset(dev_keywords)
+
+dev_keywords = dev_keywords(profiles)
+
+stats={}
+fails={}
+
+# provided by the desktop-file-utils package
+desktop_file_validate = find_binary("desktop-file-validate")
+desktop_pattern = re.compile(r'.*\.desktop$')
+
+for x in qacats:
+ stats[x]=0
+ fails[x]=[]
+
+xmllint_capable = False
+metadata_dtd = os.path.join(repoman_settings["DISTDIR"], 'metadata.dtd')
+
+def parsedate(s):
+ """Parse a RFC 822 date and time string.
+ This is required for python3 compatibility, since the
+ rfc822.parsedate() function is not available."""
+
+ s_split = []
+ for x in s.upper().split():
+ for y in x.split(','):
+ if y:
+ s_split.append(y)
+
+ if len(s_split) != 6:
+ return None
+
+ # %a, %d %b %Y %H:%M:%S %Z
+ a, d, b, Y, H_M_S, Z = s_split
+
+ # Convert month to integer, since strptime %w is locale-dependent.
+ month_map = {'JAN':1, 'FEB':2, 'MAR':3, 'APR':4, 'MAY':5, 'JUN':6,
+ 'JUL':7, 'AUG':8, 'SEP':9, 'OCT':10, 'NOV':11, 'DEC':12}
+ m = month_map.get(b)
+ if m is None:
+ return None
+ m = str(m).rjust(2, '0')
+
+ return time.strptime(':'.join((Y, m, d, H_M_S)), '%Y:%m:%d:%H:%M:%S')
+
+def fetch_metadata_dtd():
+ """
+ Fetch metadata.dtd if it doesn't exist or the ctime is older than
+ metadata_dtd_ctime_interval.
+ @rtype: bool
+ @returns: True if successful, otherwise False
+ """
+
+ must_fetch = True
+ metadata_dtd_st = None
+ current_time = int(time.time())
+ try:
+ metadata_dtd_st = os.stat(metadata_dtd)
+ except EnvironmentError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ else:
+ # Trigger fetch if metadata.dtd mtime is old or clock is wrong.
+ if abs(current_time - metadata_dtd_st.st_ctime) \
+ < metadata_dtd_ctime_interval:
+ must_fetch = False
+
+ if must_fetch:
+ print()
+ print(green("***") + " the local copy of metadata.dtd " + \
+ "needs to be refetched, doing that now")
+ print()
+ try:
+ url_f = urllib_request_urlopen(metadata_dtd_uri)
+ msg_info = url_f.info()
+ last_modified = msg_info.get('last-modified')
+ if last_modified is not None:
+ last_modified = parsedate(last_modified)
+ if last_modified is not None:
+ last_modified = calendar.timegm(last_modified)
+
+ metadata_dtd_tmp = "%s.%s" % (metadata_dtd, os.getpid())
+ try:
+ local_f = open(metadata_dtd_tmp, mode='wb')
+ local_f.write(url_f.read())
+ local_f.close()
+ if last_modified is not None:
+ try:
+ os.utime(metadata_dtd_tmp,
+ (int(last_modified), int(last_modified)))
+ except OSError:
+ # This fails on some odd non-unix-like filesystems.
+ # We don't really need the mtime to be preserved
+ # anyway here (currently we use ctime to trigger
+ # fetch), so just ignore it.
+ pass
+ os.rename(metadata_dtd_tmp, metadata_dtd)
+ finally:
+ try:
+ os.unlink(metadata_dtd_tmp)
+ except OSError:
+ pass
+
+ url_f.close()
+
+ except EnvironmentError as e:
+ print()
+ print(red("!!!")+" attempting to fetch '%s', caught" % metadata_dtd_uri)
+ print(red("!!!")+" exception '%s' though." % (e,))
+ print(red("!!!")+" fetching new metadata.dtd failed, aborting")
+ return False
+
+ return True
+
+if options.mode == "manifest":
+ pass
+elif not find_binary('xmllint'):
+ print(red("!!! xmllint not found. Can't check metadata.xml.\n"))
+ if options.xml_parse or repolevel==3:
+ print(red("!!!")+" sorry, xmllint is needed. failing\n")
+ sys.exit(1)
+else:
+ if not fetch_metadata_dtd():
+ sys.exit(1)
+ #this can be problematic if xmllint changes their output
+ xmllint_capable=True
+
+if options.mode == 'commit' and vcs:
+ utilities.detect_vcs_conflicts(options, vcs)
+
+if options.mode == "manifest":
+ pass
+elif options.pretend:
+ print(green("\nRepoMan does a once-over of the neighborhood..."))
+else:
+ print(green("\nRepoMan scours the neighborhood..."))
+
+new_ebuilds = set()
+modified_ebuilds = set()
+modified_changelogs = set()
+mychanged = []
+mynew = []
+myremoved = []
+
+if vcs == "cvs":
+ mycvstree = cvstree.getentries("./", recursive=1)
+ mychanged = cvstree.findchanged(mycvstree, recursive=1, basedir="./")
+ mynew = cvstree.findnew(mycvstree, recursive=1, basedir="./")
+if vcs == "svn":
+ svnstatus = os.popen("svn status").readlines()
+ mychanged = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem and elem[:1] in "MR" ]
+ mynew = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A") ]
+elif vcs == "git":
+ mychanged = os.popen("git diff-index --name-only --relative --diff-filter=M HEAD").readlines()
+ mychanged = ["./" + elem[:-1] for elem in mychanged]
+
+ mynew = os.popen("git diff-index --name-only --relative --diff-filter=A HEAD").readlines()
+ mynew = ["./" + elem[:-1] for elem in mynew]
+elif vcs == "bzr":
+ bzrstatus = os.popen("bzr status -S .").readlines()
+ mychanged = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M" ]
+ mynew = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] == "NK" or elem[0:1] == "R" ) ]
+elif vcs == "hg":
+ mychanged = os.popen("hg status --no-status --modified .").readlines()
+ mychanged = ["./" + elem.rstrip() for elem in mychanged]
+ mynew = os.popen("hg status --no-status --added .").readlines()
+ mynew = ["./" + elem.rstrip() for elem in mynew]
+
+if vcs:
+ new_ebuilds.update(x for x in mynew if x.endswith(".ebuild"))
+ modified_ebuilds.update(x for x in mychanged if x.endswith(".ebuild"))
+ modified_changelogs.update(x for x in chain(mychanged, mynew) \
+ if os.path.basename(x) == "ChangeLog")
+
+have_pmasked = False
+have_dev_keywords = False
+dofail = 0
+arch_caches={}
+arch_xmatch_caches = {}
+shared_xmatch_caches = {"cp-list":{}}
+
+# Disable the "ebuild.notadded" check when not in commit mode and
+# running `svn status` in every package dir will be too expensive.
+
+check_ebuild_notadded = not \
+ (vcs == "svn" and repolevel < 3 and options.mode != "commit")
+
+# Build a regex from thirdpartymirrors for the SRC_URI.mirror check.
+thirdpartymirrors = []
+for v in repoman_settings.thirdpartymirrors().values():
+ thirdpartymirrors.extend(v)
+
+class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
+ """
+ Implements doctype() as required to avoid deprecation warnings with
+ >=python-2.7.
+ """
+ def doctype(self, name, pubid, system):
+ pass
+
+try:
+ herd_base = make_herd_base(os.path.join(repoman_settings["PORTDIR"], "metadata/herds.xml"))
+except (EnvironmentError, ParseError, PermissionDenied) as e:
+ err(str(e))
+except FileNotFound:
+ # TODO: Download as we do for metadata.dtd, but add a way to
+ # disable for non-gentoo repoman users who may not have herds.
+ herd_base = None
+
+for x in scanlist:
+ #ebuilds and digests added to cvs respectively.
+ logging.info("checking package %s" % x)
+ eadded=[]
+ catdir,pkgdir=x.split("/")
+ checkdir=repodir+"/"+x
+ checkdir_relative = ""
+ if repolevel < 3:
+ checkdir_relative = os.path.join(pkgdir, checkdir_relative)
+ if repolevel < 2:
+ checkdir_relative = os.path.join(catdir, checkdir_relative)
+ checkdir_relative = os.path.join(".", checkdir_relative)
+ generated_manifest = False
+
+ if options.mode == "manifest" or \
+ (options.mode != 'manifest-check' and \
+ 'digest' in repoman_settings.features) or \
+ options.mode in ('commit', 'fix') and not options.pretend:
+ auto_assumed = set()
+ fetchlist_dict = portage.FetchlistDict(checkdir,
+ repoman_settings, portdb)
+ if options.mode == 'manifest' and options.force:
+ portage._doebuild_manifest_exempt_depend += 1
+ try:
+ distdir = repoman_settings['DISTDIR']
+ mf = portage.manifest.Manifest(checkdir, distdir,
+ fetchlist_dict=fetchlist_dict)
+ mf.create(requiredDistfiles=None,
+ assumeDistHashesAlways=True)
+ for distfiles in fetchlist_dict.values():
+ for distfile in distfiles:
+ if os.path.isfile(os.path.join(distdir, distfile)):
+ mf.fhashdict['DIST'].pop(distfile, None)
+ else:
+ auto_assumed.add(distfile)
+ mf.write()
+ finally:
+ portage._doebuild_manifest_exempt_depend -= 1
+
+ repoman_settings["O"] = checkdir
+ try:
+ generated_manifest = digestgen(
+ mysettings=repoman_settings, myportdb=portdb)
+ except portage.exception.PermissionDenied as e:
+ generated_manifest = False
+ writemsg_level("!!! Permission denied: '%s'\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+
+ if not generated_manifest:
+ print("Unable to generate manifest.")
+ dofail = 1
+
+ if options.mode == "manifest":
+ if not dofail and options.force and auto_assumed and \
+ 'assume-digests' in repoman_settings.features:
+ # Show which digests were assumed despite the --force option
+ # being given. This output will already have been shown by
+ # digestgen() if assume-digests is not enabled, so only show
+ # it here if assume-digests is enabled.
+ pkgs = list(fetchlist_dict)
+ pkgs.sort()
+ portage.writemsg_stdout(" digest.assumed" + \
+ portage.output.colorize("WARN",
+ str(len(auto_assumed)).rjust(18)) + "\n")
+ for cpv in pkgs:
+ fetchmap = fetchlist_dict[cpv]
+ pf = portage.catsplit(cpv)[1]
+ for distfile in sorted(fetchmap):
+ if distfile in auto_assumed:
+ portage.writemsg_stdout(
+ " %s::%s\n" % (pf, distfile))
+ continue
+ elif dofail:
+ sys.exit(1)
+
+ if not generated_manifest:
+ repoman_settings['O'] = checkdir
+ repoman_settings['PORTAGE_QUIET'] = '1'
+ if not portage.digestcheck([], repoman_settings, strict=1):
+ stats["manifest.bad"] += 1
+ fails["manifest.bad"].append(os.path.join(x, 'Manifest'))
+ repoman_settings.pop('PORTAGE_QUIET', None)
+
+ if options.mode == 'manifest-check':
+ continue
+
+ checkdirlist=os.listdir(checkdir)
+ ebuildlist=[]
+ pkgs = {}
+ allvalid = True
+ for y in checkdirlist:
+ if (y in no_exec or y.endswith(".ebuild")) and \
+ stat.S_IMODE(os.stat(os.path.join(checkdir, y)).st_mode) & 0o111:
+ stats["file.executable"] += 1
+ fails["file.executable"].append(os.path.join(checkdir, y))
+ if y.endswith(".ebuild"):
+ pf = y[:-7]
+ ebuildlist.append(pf)
+ cpv = "%s/%s" % (catdir, pf)
+ try:
+ myaux = dict(zip(allvars, portdb.aux_get(cpv, allvars)))
+ except KeyError:
+ allvalid = False
+ stats["ebuild.syntax"] += 1
+ fails["ebuild.syntax"].append(os.path.join(x, y))
+ continue
+ except IOError:
+ allvalid = False
+ stats["ebuild.output"] += 1
+ fails["ebuild.output"].append(os.path.join(x, y))
+ continue
+ if not portage.eapi_is_supported(myaux["EAPI"]):
+ allvalid = False
+ stats["EAPI.unsupported"] += 1
+ fails["EAPI.unsupported"].append(os.path.join(x, y))
+ continue
+ pkgs[pf] = Package(cpv=cpv, metadata=myaux,
+ root_config=root_config, type_name="ebuild")
+
+ # Sort ebuilds in ascending order for the KEYWORDS.dropped check.
+ pkgsplits = {}
+ for i in range(len(ebuildlist)):
+ ebuild_split = portage.pkgsplit(ebuildlist[i])
+ pkgsplits[ebuild_split] = ebuildlist[i]
+ ebuildlist[i] = ebuild_split
+ ebuildlist.sort(key=cmp_sort_key(portage.pkgcmp))
+ for i in range(len(ebuildlist)):
+ ebuildlist[i] = pkgsplits[ebuildlist[i]]
+ del pkgsplits
+
+ slot_keywords = {}
+
+ if len(pkgs) != len(ebuildlist):
+ # If we can't access all the metadata then it's totally unsafe to
+ # commit since there's no way to generate a correct Manifest.
+ # Do not try to do any more QA checks on this package since missing
+ # metadata leads to false positives for several checks, and false
+ # positives confuse users.
+ can_force = False
+ continue
+
+ for y in checkdirlist:
+ m = disallowed_filename_chars_re.search(y.strip(os.sep))
+ if m is not None:
+ stats["file.name"] += 1
+ fails["file.name"].append("%s/%s: char '%s'" % \
+ (checkdir, y, m.group(0)))
+
+ if not (y in ("ChangeLog", "metadata.xml") or y.endswith(".ebuild")):
+ continue
+ try:
+ line = 1
+ for l in io.open(_unicode_encode(os.path.join(checkdir, y),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content']):
+ line +=1
+ except UnicodeDecodeError as ue:
+ stats["file.UTF8"] += 1
+ s = ue.object[:ue.start]
+ l2 = s.count("\n")
+ line += l2
+ if l2 != 0:
+ s = s[s.rfind("\n") + 1:]
+ fails["file.UTF8"].append("%s/%s: line %i, just after: '%s'" % (checkdir, y, line, s))
+
+ if vcs in ("git", "hg") and check_ebuild_notadded:
+ if vcs == "git":
+ myf = os.popen("git ls-files --others %s" % \
+ (portage._shell_quote(checkdir_relative),))
+ if vcs == "hg":
+ myf = os.popen("hg status --no-status --unknown %s" % \
+ (portage._shell_quote(checkdir_relative),))
+ for l in myf:
+ if l[:-1][-7:] == ".ebuild":
+ stats["ebuild.notadded"] += 1
+ fails["ebuild.notadded"].append(
+ os.path.join(x, os.path.basename(l[:-1])))
+ myf.close()
+
+ if vcs in ("cvs", "svn", "bzr") and check_ebuild_notadded:
+ try:
+ if vcs == "cvs":
+ myf=open(checkdir+"/CVS/Entries","r")
+ if vcs == "svn":
+ myf = os.popen("svn status --depth=files --verbose " + checkdir)
+ if vcs == "bzr":
+ myf = os.popen("bzr ls -v --kind=file " + checkdir)
+ myl = myf.readlines()
+ myf.close()
+ for l in myl:
+ if vcs == "cvs":
+ if l[0]!="/":
+ continue
+ splitl=l[1:].split("/")
+ if not len(splitl):
+ continue
+ if splitl[0][-7:]==".ebuild":
+ eadded.append(splitl[0][:-7])
+ if vcs == "svn":
+ if l[:1] == "?":
+ continue
+ if l[:7] == ' >':
+ # tree conflict, new in subversion 1.6
+ continue
+ l = l.split()[-1]
+ if l[-7:] == ".ebuild":
+ eadded.append(os.path.basename(l[:-7]))
+ if vcs == "bzr":
+ if l[1:2] == "?":
+ continue
+ l = l.split()[-1]
+ if l[-7:] == ".ebuild":
+ eadded.append(os.path.basename(l[:-7]))
+ if vcs == "svn":
+ myf = os.popen("svn status " + checkdir)
+ myl=myf.readlines()
+ myf.close()
+ for l in myl:
+ if l[0] == "A":
+ l = l.rstrip().split(' ')[-1]
+ if l[-7:] == ".ebuild":
+ eadded.append(os.path.basename(l[:-7]))
+ except IOError:
+ if vcs == "cvs":
+ stats["CVS/Entries.IO_error"] += 1
+ fails["CVS/Entries.IO_error"].append(checkdir+"/CVS/Entries")
+ else:
+ raise
+ continue
+
+ mf = Manifest(checkdir, repoman_settings["DISTDIR"])
+ mydigests=mf.getTypeDigests("DIST")
+
+ fetchlist_dict = portage.FetchlistDict(checkdir, repoman_settings, portdb)
+ myfiles_all = []
+ src_uri_error = False
+ for mykey in fetchlist_dict:
+ try:
+ myfiles_all.extend(fetchlist_dict[mykey])
+ except portage.exception.InvalidDependString as e:
+ src_uri_error = True
+ try:
+ portdb.aux_get(mykey, ["SRC_URI"])
+ except KeyError:
+ # This will be reported as an "ebuild.syntax" error.
+ pass
+ else:
+ stats["SRC_URI.syntax"] = stats["SRC_URI.syntax"] + 1
+ fails["SRC_URI.syntax"].append(
+ "%s.ebuild SRC_URI: %s" % (mykey, e))
+ del fetchlist_dict
+ if not src_uri_error:
+ # This test can produce false positives if SRC_URI could not
+ # be parsed for one or more ebuilds. There's no point in
+ # producing a false error here since the root cause will
+ # produce a valid error elsewhere, such as "SRC_URI.syntax"
+ # or "ebuild.sytax".
+ myfiles_all = set(myfiles_all)
+ for entry in mydigests:
+ if entry not in myfiles_all:
+ stats["digest.unused"] += 1
+ fails["digest.unused"].append(checkdir+"::"+entry)
+ for entry in myfiles_all:
+ if entry not in mydigests:
+ stats["digest.missing"] += 1
+ fails["digest.missing"].append(checkdir+"::"+entry)
+ del myfiles_all
+
+ if os.path.exists(checkdir+"/files"):
+ filesdirlist=os.listdir(checkdir+"/files")
+
+ # recurse through files directory
+ # use filesdirlist as a stack, appending directories as needed so people can't hide > 20k files in a subdirectory.
+ while filesdirlist:
+ y = filesdirlist.pop(0)
+ relative_path = os.path.join(x, "files", y)
+ full_path = os.path.join(repodir, relative_path)
+ try:
+ mystat = os.stat(full_path)
+ except OSError as oe:
+ if oe.errno == 2:
+ # don't worry about it. it likely was removed via fix above.
+ continue
+ else:
+ raise oe
+ if S_ISDIR(mystat.st_mode):
+ # !!! VCS "portability" alert! Need some function isVcsDir() or alike !!!
+ if y == "CVS" or y == ".svn":
+ continue
+ for z in os.listdir(checkdir+"/files/"+y):
+ if z == "CVS" or z == ".svn":
+ continue
+ filesdirlist.append(y+"/"+z)
+ # Current policy is no files over 20 KiB, these are the checks. File size between
+ # 20 KiB and 60 KiB causes a warning, while file size over 60 KiB causes an error.
+ elif mystat.st_size > 61440:
+ stats["file.size.fatal"] += 1
+ fails["file.size.fatal"].append("("+ str(mystat.st_size//1024) + " KiB) "+x+"/files/"+y)
+ elif mystat.st_size > 20480:
+ stats["file.size"] += 1
+ fails["file.size"].append("("+ str(mystat.st_size//1024) + " KiB) "+x+"/files/"+y)
+
+ m = disallowed_filename_chars_re.search(
+ os.path.basename(y.rstrip(os.sep)))
+ if m is not None:
+ stats["file.name"] += 1
+ fails["file.name"].append("%s/files/%s: char '%s'" % \
+ (checkdir, y, m.group(0)))
+
+ if desktop_file_validate and desktop_pattern.match(y):
+ status, cmd_output = subprocess_getstatusoutput(
+ "'%s' '%s'" % (desktop_file_validate, full_path))
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) != os.EX_OK:
+ # Note: in the future we may want to grab the
+ # warnings in addition to the errors. We're
+ # just doing errors now since we don't want
+ # to generate too much noise at first.
+ error_re = re.compile(r'.*\s*error:\s*(.*)')
+ for line in cmd_output.splitlines():
+ error_match = error_re.match(line)
+ if error_match is None:
+ continue
+ stats["desktop.invalid"] += 1
+ fails["desktop.invalid"].append(
+ relative_path + ': %s' % error_match.group(1))
+
+ del mydigests
+
+ if check_changelog and "ChangeLog" not in checkdirlist:
+ stats["changelog.missing"]+=1
+ fails["changelog.missing"].append(x+"/ChangeLog")
+
+ musedict = {}
+ #metadata.xml file check
+ if "metadata.xml" not in checkdirlist:
+ stats["metadata.missing"]+=1
+ fails["metadata.missing"].append(x+"/metadata.xml")
+ #metadata.xml parse check
+ else:
+ metadata_bad = False
+
+ # read metadata.xml into memory
+ try:
+ _metadata_xml = xml.etree.ElementTree.parse(
+ os.path.join(checkdir, "metadata.xml"),
+ parser=xml.etree.ElementTree.XMLParser(
+ target=_MetadataTreeBuilder()))
+ except (ExpatError, SyntaxError, EnvironmentError) as e:
+ metadata_bad = True
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
+ del e
+ else:
+ # load USE flags from metadata.xml
+ try:
+ musedict = utilities.parse_metadata_use(_metadata_xml)
+ except portage.exception.ParseError as e:
+ metadata_bad = True
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
+
+ # Run other metadata.xml checkers
+ try:
+ utilities.check_metadata(_metadata_xml, herd_base)
+ except (utilities.UnknownHerdsError, ) as e:
+ metadata_bad = True
+ stats["metadata.bad"] += 1
+ fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
+ del e
+
+ #Only carry out if in package directory or check forced
+ if xmllint_capable and not metadata_bad:
+ # xmlint can produce garbage output even on success, so only dump
+ # the ouput when it fails.
+ st, out = subprocess_getstatusoutput(
+ "xmllint --nonet --noout --dtdvalid '%s' '%s'" % \
+ (metadata_dtd, os.path.join(checkdir, "metadata.xml")))
+ if st != os.EX_OK:
+ print(red("!!!") + " metadata.xml is invalid:")
+ for z in out.splitlines():
+ print(red("!!! ")+z)
+ stats["metadata.bad"]+=1
+ fails["metadata.bad"].append(x+"/metadata.xml")
+
+ del metadata_bad
+ muselist = frozenset(musedict)
+
+ changelog_path = os.path.join(checkdir_relative, "ChangeLog")
+ changelog_modified = changelog_path in modified_changelogs
+
+ allmasked = True
+ # detect unused local USE-descriptions
+ used_useflags = set()
+
+ for y in ebuildlist:
+ relative_path = os.path.join(x, y + ".ebuild")
+ full_path = os.path.join(repodir, relative_path)
+ ebuild_path = y + ".ebuild"
+ if repolevel < 3:
+ ebuild_path = os.path.join(pkgdir, ebuild_path)
+ if repolevel < 2:
+ ebuild_path = os.path.join(catdir, ebuild_path)
+ ebuild_path = os.path.join(".", ebuild_path)
+ if check_changelog and not changelog_modified \
+ and ebuild_path in new_ebuilds:
+ stats['changelog.ebuildadded'] += 1
+ fails['changelog.ebuildadded'].append(relative_path)
+
+ if vcs in ("cvs", "svn", "bzr") and check_ebuild_notadded and y not in eadded:
+ #ebuild not added to vcs
+ stats["ebuild.notadded"]=stats["ebuild.notadded"]+1
+ fails["ebuild.notadded"].append(x+"/"+y+".ebuild")
+ myesplit=portage.pkgsplit(y)
+ if myesplit is None or myesplit[0] != x.split("/")[-1] \
+ or pv_toolong_re.search(myesplit[1]) \
+ or pv_toolong_re.search(myesplit[2]):
+ stats["ebuild.invalidname"]=stats["ebuild.invalidname"]+1
+ fails["ebuild.invalidname"].append(x+"/"+y+".ebuild")
+ continue
+ elif myesplit[0]!=pkgdir:
+ print(pkgdir,myesplit[0])
+ stats["ebuild.namenomatch"]=stats["ebuild.namenomatch"]+1
+ fails["ebuild.namenomatch"].append(x+"/"+y+".ebuild")
+ continue
+
+ pkg = pkgs[y]
+
+ if pkg.invalid:
+ allvalid = False
+ for k, msgs in pkg.invalid.items():
+ for msg in msgs:
+ stats[k] = stats[k] + 1
+ fails[k].append("%s %s" % (relative_path, msg))
+ continue
+
+ myaux = pkg.metadata
+ eapi = myaux["EAPI"]
+ inherited = pkg.inherited
+ live_ebuild = live_eclasses.intersection(inherited)
+
+ for k, v in myaux.items():
+ if not isinstance(v, basestring):
+ continue
+ m = non_ascii_re.search(v)
+ if m is not None:
+ stats["variable.invalidchar"] += 1
+ fails["variable.invalidchar"].append(
+ ("%s: %s variable contains non-ASCII " + \
+ "character at position %s") % \
+ (relative_path, k, m.start() + 1))
+
+ if not src_uri_error:
+ # Check that URIs don't reference a server from thirdpartymirrors.
+ for uri in portage.dep.use_reduce( \
+ myaux["SRC_URI"], matchall=True, is_src_uri=True, eapi=eapi, flat=True):
+ contains_mirror = False
+ for mirror in thirdpartymirrors:
+ if uri.startswith(mirror):
+ contains_mirror = True
+ break
+ if not contains_mirror:
+ continue
+
+ stats["SRC_URI.mirror"] += 1
+ fails["SRC_URI.mirror"].append(
+ "%s: '%s' found in thirdpartymirrors" % \
+ (relative_path, mirror))
+
+ if myaux.get("PROVIDE"):
+ stats["virtual.oldstyle"]+=1
+ fails["virtual.oldstyle"].append(relative_path)
+
+ for pos, missing_var in enumerate(missingvars):
+ if not myaux.get(missing_var):
+ if catdir == "virtual" and \
+ missing_var in ("HOMEPAGE", "LICENSE"):
+ continue
+ if live_ebuild and missing_var == "KEYWORDS":
+ continue
+ myqakey=missingvars[pos]+".missing"
+ stats[myqakey]=stats[myqakey]+1
+ fails[myqakey].append(x+"/"+y+".ebuild")
+
+ if catdir == "virtual":
+ for var in ("HOMEPAGE", "LICENSE"):
+ if myaux.get(var):
+ myqakey = var + ".virtual"
+ stats[myqakey] = stats[myqakey] + 1
+ fails[myqakey].append(relative_path)
+
+ # 14 is the length of DESCRIPTION=""
+ if len(myaux['DESCRIPTION']) > max_desc_len:
+ stats['DESCRIPTION.toolong'] += 1
+ fails['DESCRIPTION.toolong'].append(
+ "%s: DESCRIPTION is %d characters (max %d)" % \
+ (relative_path, len(myaux['DESCRIPTION']), max_desc_len))
+
+ keywords = myaux["KEYWORDS"].split()
+ stable_keywords = []
+ for keyword in keywords:
+ if not keyword.startswith("~") and \
+ not keyword.startswith("-"):
+ stable_keywords.append(keyword)
+ if stable_keywords:
+ if ebuild_path in new_ebuilds:
+ stable_keywords.sort()
+ stats["KEYWORDS.stable"] += 1
+ fails["KEYWORDS.stable"].append(
+ x + "/" + y + ".ebuild added with stable keywords: %s" % \
+ " ".join(stable_keywords))
+
+ ebuild_archs = set(kw.lstrip("~") for kw in keywords \
+ if not kw.startswith("-"))
+
+ previous_keywords = slot_keywords.get(myaux["SLOT"])
+ if previous_keywords is None:
+ slot_keywords[myaux["SLOT"]] = set()
+ elif ebuild_archs and not live_ebuild:
+ dropped_keywords = previous_keywords.difference(ebuild_archs)
+ if dropped_keywords:
+ stats["KEYWORDS.dropped"] += 1
+ fails["KEYWORDS.dropped"].append(
+ relative_path + ": %s" % \
+ " ".join(sorted(dropped_keywords)))
+
+ slot_keywords[myaux["SLOT"]].update(ebuild_archs)
+
+ # KEYWORDS="-*" is a stupid replacement for package.mask and screws general KEYWORDS semantics
+ if "-*" in keywords:
+ haskeyword = False
+ for kw in keywords:
+ if kw[0] == "~":
+ kw = kw[1:]
+ if kw in kwlist:
+ haskeyword = True
+ if not haskeyword:
+ stats["KEYWORDS.stupid"] += 1
+ fails["KEYWORDS.stupid"].append(x+"/"+y+".ebuild")
+
+ """
+ Ebuilds that inherit a "Live" eclass (darcs,subversion,git,cvs,etc..) should
+ not be allowed to be marked stable
+ """
+ if live_ebuild:
+ bad_stable_keywords = []
+ for keyword in keywords:
+ if not keyword.startswith("~") and \
+ not keyword.startswith("-"):
+ bad_stable_keywords.append(keyword)
+ del keyword
+ if bad_stable_keywords:
+ stats["LIVEVCS.stable"] += 1
+ fails["LIVEVCS.stable"].append(
+ x + "/" + y + ".ebuild with stable keywords:%s " % \
+ bad_stable_keywords)
+ del bad_stable_keywords
+
+ if keywords and not has_global_mask(pkg):
+ stats["LIVEVCS.unmasked"] += 1
+ fails["LIVEVCS.unmasked"].append(relative_path)
+
+ if options.ignore_arches:
+ arches = [[repoman_settings["ARCH"], repoman_settings["ARCH"],
+ repoman_settings["ACCEPT_KEYWORDS"].split()]]
+ else:
+ arches=[]
+ for keyword in myaux["KEYWORDS"].split():
+ if (keyword[0]=="-"):
+ continue
+ elif (keyword[0]=="~"):
+ arches.append([keyword, keyword[1:], [keyword[1:], keyword]])
+ else:
+ arches.append([keyword, keyword, [keyword]])
+ allmasked = False
+ if not arches:
+ # Use an empty profile for checking dependencies of
+ # packages that have empty KEYWORDS.
+ arches.append(['**', '**', ['**']])
+
+ unknown_pkgs = {}
+ baddepsyntax = False
+ badlicsyntax = False
+ badprovsyntax = False
+ catpkg = catdir+"/"+y
+
+ inherited_java_eclass = "java-pkg-2" in inherited or \
+ "java-pkg-opt-2" in inherited
+ inherited_wxwidgets_eclass = "wxwidgets" in inherited
+ operator_tokens = set(["||", "(", ")"])
+ type_list, badsyntax = [], []
+ for mytype in ("DEPEND", "RDEPEND", "PDEPEND",
+ "LICENSE", "PROPERTIES", "PROVIDE"):
+ mydepstr = myaux[mytype]
+
+ token_class = None
+ if mytype in ("DEPEND", "RDEPEND", "PDEPEND"):
+ token_class=portage.dep.Atom
+
+ try:
+ atoms = portage.dep.use_reduce(mydepstr, matchall=1, flat=True, \
+ is_valid_flag=pkg.iuse.is_valid_flag, token_class=token_class)
+ except portage.exception.InvalidDependString as e:
+ atoms = None
+ badsyntax.append(str(e))
+
+ if atoms and mytype in ("DEPEND", "RDEPEND", "PDEPEND"):
+ if mytype in ("RDEPEND", "PDEPEND") and \
+ "test?" in mydepstr.split():
+ stats[mytype + '.suspect'] += 1
+ fails[mytype + '.suspect'].append(relative_path + \
+ ": 'test?' USE conditional in %s" % mytype)
+
+ for atom in atoms:
+ if atom == "||":
+ continue
+
+ if not atom.blocker and \
+ not portdb.cp_list(atom.cp) and \
+ not atom.cp.startswith("virtual/"):
+ unknown_pkgs.setdefault(atom.cp, set()).add(
+ (mytype, atom.unevaluated_atom))
+
+ is_blocker = atom.blocker
+
+ if mytype == "DEPEND" and \
+ not is_blocker and \
+ not inherited_java_eclass and \
+ atom.cp == "virtual/jdk":
+ stats['java.eclassesnotused'] += 1
+ fails['java.eclassesnotused'].append(relative_path)
+ elif mytype == "DEPEND" and \
+ not is_blocker and \
+ not inherited_wxwidgets_eclass and \
+ atom.cp == "x11-libs/wxGTK":
+ stats['wxwidgets.eclassnotused'] += 1
+ fails['wxwidgets.eclassnotused'].append(
+ relative_path + ": DEPENDs on x11-libs/wxGTK"
+ " without inheriting wxwidgets.eclass")
+ elif mytype in ("PDEPEND", "RDEPEND"):
+ if not is_blocker and \
+ atom.cp in suspect_rdepend:
+ stats[mytype + '.suspect'] += 1
+ fails[mytype + '.suspect'].append(
+ relative_path + ": '%s'" % atom)
+
+ if atom.operator == "~" and \
+ portage.versions.catpkgsplit(atom.cpv)[3] != "r0":
+ stats[mytype + '.badtilde'] += 1
+ fails[mytype + '.badtilde'].append(
+ (relative_path + ": %s uses the ~ operator"
+ " with a non-zero revision:" + \
+ " '%s'") % (mytype, atom))
+
+ type_list.extend([mytype] * (len(badsyntax) - len(type_list)))
+
+ for m,b in zip(type_list, badsyntax):
+ stats[m+".syntax"] += 1
+ fails[m+".syntax"].append(catpkg+".ebuild "+m+": "+b)
+
+ badlicsyntax = len([z for z in type_list if z == "LICENSE"])
+ badprovsyntax = len([z for z in type_list if z == "PROVIDE"])
+ baddepsyntax = len(type_list) != badlicsyntax + badprovsyntax
+ badlicsyntax = badlicsyntax > 0
+ badprovsyntax = badprovsyntax > 0
+
+ # uselist checks - global
+ myuse = []
+ default_use = []
+ for myflag in myaux["IUSE"].split():
+ flag_name = myflag.lstrip("+-")
+ used_useflags.add(flag_name)
+ if myflag != flag_name:
+ default_use.append(myflag)
+ if flag_name not in uselist:
+ myuse.append(flag_name)
+
+ # uselist checks - metadata
+ for mypos in range(len(myuse)-1,-1,-1):
+ if myuse[mypos] and (myuse[mypos] in muselist):
+ del myuse[mypos]
+
+ if default_use and not eapi_has_iuse_defaults(eapi):
+ for myflag in default_use:
+ stats['EAPI.incompatible'] += 1
+ fails['EAPI.incompatible'].append(
+ (relative_path + ": IUSE defaults" + \
+ " not supported with EAPI='%s':" + \
+ " '%s'") % (eapi, myflag))
+
+ for mypos in range(len(myuse)):
+ stats["IUSE.invalid"]=stats["IUSE.invalid"]+1
+ fails["IUSE.invalid"].append(x+"/"+y+".ebuild: %s" % myuse[mypos])
+
+ # license checks
+ if not badlicsyntax:
+ # Parse the LICENSE variable, remove USE conditions and
+ # flatten it.
+ licenses = portage.dep.use_reduce(myaux["LICENSE"], matchall=1, flat=True)
+ # Check each entry to ensure that it exists in PORTDIR's
+ # license directory.
+ for lic in licenses:
+ # Need to check for "||" manually as no portage
+ # function will remove it without removing values.
+ if lic not in liclist and lic != "||":
+ stats["LICENSE.invalid"]=stats["LICENSE.invalid"]+1
+ fails["LICENSE.invalid"].append(x+"/"+y+".ebuild: %s" % lic)
+
+ #keyword checks
+ myuse = myaux["KEYWORDS"].split()
+ for mykey in myuse:
+ myskey=mykey[:]
+ if myskey[0]=="-":
+ myskey=myskey[1:]
+ if myskey[0]=="~":
+ myskey=myskey[1:]
+ if mykey!="-*":
+ if myskey not in kwlist:
+ stats["KEYWORDS.invalid"] += 1
+ fails["KEYWORDS.invalid"].append(x+"/"+y+".ebuild: %s" % mykey)
+ elif myskey not in profiles:
+ stats["KEYWORDS.invalid"] += 1
+ fails["KEYWORDS.invalid"].append(x+"/"+y+".ebuild: %s (profile invalid)" % mykey)
+
+ #restrict checks
+ myrestrict = None
+ try:
+ myrestrict = portage.dep.use_reduce(myaux["RESTRICT"], matchall=1, flat=True)
+ except portage.exception.InvalidDependString as e:
+ stats["RESTRICT.syntax"] = stats["RESTRICT.syntax"] + 1
+ fails["RESTRICT.syntax"].append(
+ "%s: RESTRICT: %s" % (relative_path, e))
+ del e
+ if myrestrict:
+ myrestrict = set(myrestrict)
+ mybadrestrict = myrestrict.difference(valid_restrict)
+ if mybadrestrict:
+ stats["RESTRICT.invalid"] += len(mybadrestrict)
+ for mybad in mybadrestrict:
+ fails["RESTRICT.invalid"].append(x+"/"+y+".ebuild: %s" % mybad)
+ #REQUIRED_USE check
+ required_use = myaux["REQUIRED_USE"]
+ if required_use:
+ if not eapi_has_required_use(eapi):
+ stats['EAPI.incompatible'] += 1
+ fails['EAPI.incompatible'].append(
+ relative_path + ": REQUIRED_USE" + \
+ " not supported with EAPI='%s'" % (eapi,))
+ try:
+ portage.dep.check_required_use(required_use, (),
+ pkg.iuse.is_valid_flag)
+ except portage.exception.InvalidDependString as e:
+ stats["REQUIRED_USE.syntax"] = stats["REQUIRED_USE.syntax"] + 1
+ fails["REQUIRED_USE.syntax"].append(
+ "%s: REQUIRED_USE: %s" % (relative_path, e))
+ del e
+
+ # Syntax Checks
+ relative_path = os.path.join(x, y + ".ebuild")
+ full_path = os.path.join(repodir, relative_path)
+ if not vcs_preserves_mtime:
+ if ebuild_path not in new_ebuilds and \
+ ebuild_path not in modified_ebuilds:
+ pkg.mtime = None
+ try:
+ # All ebuilds should have utf_8 encoding.
+ f = io.open(_unicode_encode(full_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'])
+ try:
+ for check_name, e in run_checks(f, pkg):
+ stats[check_name] += 1
+ fails[check_name].append(relative_path + ': %s' % e)
+ finally:
+ f.close()
+ except UnicodeDecodeError:
+ # A file.UTF8 failure will have already been recorded above.
+ pass
+
+ if options.force:
+ # The dep_check() calls are the most expensive QA test. If --force
+ # is enabled, there's no point in wasting time on these since the
+ # user is intent on forcing the commit anyway.
+ continue
+
+ for keyword,arch,groups in arches:
+
+ if arch not in profiles:
+ # A missing profile will create an error further down
+ # during the KEYWORDS verification.
+ continue
+
+ for prof in profiles[arch]:
+
+ if prof.status not in ("stable", "dev") or \
+ prof.status == "dev" and not options.include_dev:
+ continue
+
+ dep_settings = arch_caches.get(prof.sub_path)
+ if dep_settings is None:
+ dep_settings = portage.config(
+ config_profile_path=prof.abs_path,
+ config_incrementals=repoman_incrementals,
+ local_config=False,
+ _unmatched_removal=options.unmatched_removal,
+ env=env)
+ if options.without_mask:
+ dep_settings._mask_manager = \
+ copy.deepcopy(dep_settings._mask_manager)
+ dep_settings._mask_manager._pmaskdict.clear()
+ arch_caches[prof.sub_path] = dep_settings
+
+ xmatch_cache_key = (prof.sub_path, tuple(groups))
+ xcache = arch_xmatch_caches.get(xmatch_cache_key)
+ if xcache is None:
+ portdb.melt()
+ portdb.freeze()
+ xcache = portdb.xcache
+ xcache.update(shared_xmatch_caches)
+ arch_xmatch_caches[xmatch_cache_key] = xcache
+
+ trees["/"]["porttree"].settings = dep_settings
+ portdb.settings = dep_settings
+ portdb.xcache = xcache
+ # for package.use.mask support inside dep_check
+ dep_settings.setcpv(pkg)
+ dep_settings["ACCEPT_KEYWORDS"] = " ".join(groups)
+ # just in case, prevent config.reset() from nuking these.
+ dep_settings.backup_changes("ACCEPT_KEYWORDS")
+
+ if not baddepsyntax:
+ ismasked = not ebuild_archs or \
+ pkg.cpv not in portdb.xmatch("list-visible", pkg.cp)
+ if ismasked:
+ if not have_pmasked:
+ have_pmasked = bool(dep_settings._getMaskAtom(
+ pkg.cpv, pkg.metadata))
+ if options.ignore_masked:
+ continue
+ #we are testing deps for a masked package; give it some lee-way
+ suffix="masked"
+ matchmode = "minimum-all"
+ else:
+ suffix=""
+ matchmode = "minimum-visible"
+
+ if not have_dev_keywords:
+ have_dev_keywords = \
+ bool(dev_keywords.intersection(keywords))
+
+ if prof.status == "dev":
+ suffix=suffix+"indev"
+
+ for mytype,mypos in [["DEPEND",len(missingvars)],["RDEPEND",len(missingvars)+1],["PDEPEND",len(missingvars)+2]]:
+
+ mykey=mytype+".bad"+suffix
+ myvalue = myaux[mytype]
+ if not myvalue:
+ continue
+
+ success, atoms = portage.dep_check(myvalue, portdb,
+ dep_settings, use="all", mode=matchmode,
+ trees=trees)
+
+ if success:
+ if atoms:
+ for atom in atoms:
+ if not atom.blocker:
+ # Don't bother with dependency.unknown
+ # for cases in which *DEPEND.bad is
+ # triggered.
+ unknown_pkgs.pop(atom.cp, None)
+
+ if not prof.sub_path:
+ # old-style virtuals currently aren't
+ # resolvable with empty profile, since
+ # 'virtuals' mappings are unavailable
+ # (it would be expensive to search
+ # for PROVIDE in all ebuilds)
+ atoms = [atom for atom in atoms if not \
+ (atom.cp.startswith('virtual/') and \
+ not portdb.cp_list(atom.cp))]
+
+ #we have some unsolvable deps
+ #remove ! deps, which always show up as unsatisfiable
+ atoms = [str(atom.unevaluated_atom) \
+ for atom in atoms if not atom.blocker]
+
+ #if we emptied out our list, continue:
+ if not atoms:
+ continue
+ stats[mykey]=stats[mykey]+1
+ fails[mykey].append("%s: %s(%s) %s" % \
+ (relative_path, keyword,
+ prof, repr(atoms)))
+ else:
+ stats[mykey]=stats[mykey]+1
+ fails[mykey].append("%s: %s(%s) %s" % \
+ (relative_path, keyword,
+ prof, repr(atoms)))
+
+ if not baddepsyntax and unknown_pkgs:
+ all_unknown = set()
+ all_unknown.update(*unknown_pkgs.values())
+ type_map = {}
+ for mytype, atom in all_unknown:
+ type_map.setdefault(mytype, set()).add(atom)
+ for mytype, atoms in type_map.items():
+ stats["dependency.unknown"] += 1
+ fails["dependency.unknown"].append("%s: %s: %s" %
+ (relative_path, mytype, ", ".join(sorted(atoms))))
+
+ # Check for 'all unstable' or 'all masked' -- ACCEPT_KEYWORDS is stripped
+ # XXX -- Needs to be implemented in dep code. Can't determine ~arch nicely.
+ #if not portage.portdb.xmatch("bestmatch-visible",x):
+ # stats["ebuild.nostable"]+=1
+ # fails["ebuild.nostable"].append(x)
+ if ebuildlist and allmasked and repolevel == 3:
+ stats["ebuild.allmasked"]+=1
+ fails["ebuild.allmasked"].append(x)
+
+ # check if there are unused local USE-descriptions in metadata.xml
+ # (unless there are any invalids, to avoid noise)
+ if allvalid:
+ for myflag in muselist.difference(used_useflags):
+ stats["metadata.warning"] += 1
+ fails["metadata.warning"].append(
+ "%s/metadata.xml: unused local USE-description: '%s'" % \
+ (x, myflag))
+
+if options.mode == "manifest":
+ sys.exit(dofail)
+
+#dofail will be set to 1 if we have failed in at least one non-warning category
+dofail=0
+#dowarn will be set to 1 if we tripped any warnings
+dowarn=0
+#dofull will be set if we should print a "repoman full" informational message
+dofull = options.mode != 'full'
+
+for x in qacats:
+ if not stats[x]:
+ continue
+ dowarn = 1
+ if x not in qawarnings:
+ dofail = 1
+
+if dofail or \
+ (dowarn and not (options.quiet or options.mode == "scan")):
+ dofull = 0
+
+# Save QA output so that it can be conveniently displayed
+# in $EDITOR while the user creates a commit message.
+# Otherwise, the user would not be able to see this output
+# once the editor has taken over the screen.
+qa_output = io.StringIO()
+style_file = ConsoleStyleFile(sys.stdout)
+if options.mode == 'commit' and \
+ (not commitmessage or not commitmessage.strip()):
+ style_file.write_listener = qa_output
+console_writer = StyleWriter(file=style_file, maxcol=9999)
+console_writer.style_listener = style_file.new_styles
+
+f = formatter.AbstractFormatter(console_writer)
+
+utilities.format_qa_output(f, stats, fails, dofull, dofail, options, qawarnings)
+
+style_file.flush()
+del console_writer, f, style_file
+qa_output = qa_output.getvalue()
+qa_output = qa_output.splitlines(True)
+
+def grouplist(mylist,seperator="/"):
+ """(list,seperator="/") -- Takes a list of elements; groups them into
+ same initial element categories. Returns a dict of {base:[sublist]}
+ From: ["blah/foo","spork/spatula","blah/weee/splat"]
+ To: {"blah":["foo","weee/splat"], "spork":["spatula"]}"""
+ mygroups={}
+ for x in mylist:
+ xs=x.split(seperator)
+ if xs[0]==".":
+ xs=xs[1:]
+ if xs[0] not in mygroups:
+ mygroups[xs[0]]=[seperator.join(xs[1:])]
+ else:
+ mygroups[xs[0]]+=[seperator.join(xs[1:])]
+ return mygroups
+
+suggest_ignore_masked = False
+suggest_include_dev = False
+
+if have_pmasked and not (options.without_mask or options.ignore_masked):
+ suggest_ignore_masked = True
+if have_dev_keywords and not options.include_dev:
+ suggest_include_dev = True
+
+if suggest_ignore_masked or suggest_include_dev:
+ print()
+ if suggest_ignore_masked:
+ print(bold("Note: use --without-mask to check " + \
+ "KEYWORDS on dependencies of masked packages"))
+
+ if suggest_include_dev:
+ print(bold("Note: use --include-dev (-d) to check " + \
+ "dependencies for 'dev' profiles"))
+ print()
+
+if options.mode != 'commit':
+ if dofull:
+ print(bold("Note: type \"repoman full\" for a complete listing."))
+ if dowarn and not dofail:
+ print(green("RepoMan sez:"),"\"You're only giving me a partial QA payment?\n I'll take it this time, but I'm not happy.\"")
+ elif not dofail:
+ print(green("RepoMan sez:"),"\"If everyone were like you, I'd be out of business!\"")
+ elif dofail:
+ print(bad("Please fix these important QA issues first."))
+ print(green("RepoMan sez:"),"\"Make your QA payment on time and you'll never see the likes of me.\"\n")
+ sys.exit(1)
+else:
+ if dofail and can_force and options.force and not options.pretend:
+ print(green("RepoMan sez:") + \
+ " \"You want to commit even with these QA issues?\n" + \
+ " I'll take it this time, but I'm not happy.\"\n")
+ elif dofail:
+ if options.force and not can_force:
+ print(bad("The --force option has been disabled due to extraordinary issues."))
+ print(bad("Please fix these important QA issues first."))
+ print(green("RepoMan sez:"),"\"Make your QA payment on time and you'll never see the likes of me.\"\n")
+ sys.exit(1)
+
+ if options.pretend:
+ print(green("RepoMan sez:"), "\"So, you want to play it safe. Good call.\"\n")
+
+ myunadded = []
+ if vcs == "cvs":
+ try:
+ myvcstree=portage.cvstree.getentries("./",recursive=1)
+ myunadded=portage.cvstree.findunadded(myvcstree,recursive=1,basedir="./")
+ except SystemExit as e:
+ raise # TODO propagate this
+ except:
+ err("Error retrieving CVS tree; exiting.")
+ if vcs == "svn":
+ try:
+ svnstatus=os.popen("svn status --no-ignore").readlines()
+ myunadded = [ "./"+elem.rstrip().split()[1] for elem in svnstatus if elem.startswith("?") or elem.startswith("I") ]
+ except SystemExit as e:
+ raise # TODO propagate this
+ except:
+ err("Error retrieving SVN info; exiting.")
+ if vcs == "git":
+ # get list of files not under version control or missing
+ myf = os.popen("git ls-files --others")
+ myunadded = [ "./" + elem[:-1] for elem in myf ]
+ myf.close()
+ if vcs == "bzr":
+ try:
+ bzrstatus=os.popen("bzr status -S .").readlines()
+ myunadded = [ "./"+elem.rstrip().split()[1].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("?") or elem[0:2] == " D" ]
+ except SystemExit as e:
+ raise # TODO propagate this
+ except:
+ err("Error retrieving bzr info; exiting.")
+ if vcs == "hg":
+ myunadded = os.popen("hg status --no-status --unknown .").readlines()
+ myunadded = ["./" + elem.rstrip() for elem in myunadded]
+
+ # Mercurial doesn't handle manually deleted files as removed from
+ # the repository, so the user need to remove them before commit,
+ # using "hg remove [FILES]"
+ mydeleted = os.popen("hg status --no-status --deleted .").readlines()
+ mydeleted = ["./" + elem.rstrip() for elem in mydeleted]
+
+
+ myautoadd=[]
+ if myunadded:
+ for x in range(len(myunadded)-1,-1,-1):
+ xs=myunadded[x].split("/")
+ if xs[-1]=="files":
+ print("!!! files dir is not added! Please correct this.")
+ sys.exit(-1)
+ elif xs[-1]=="Manifest":
+ # It's a manifest... auto add
+ myautoadd+=[myunadded[x]]
+ del myunadded[x]
+
+ if myautoadd:
+ print(">>> Auto-Adding missing Manifest(s)...")
+ if options.pretend:
+ if vcs == "cvs":
+ print("(cvs add "+" ".join(myautoadd)+")")
+ elif vcs == "svn":
+ print("(svn add "+" ".join(myautoadd)+")")
+ elif vcs == "git":
+ print("(git add "+" ".join(myautoadd)+")")
+ elif vcs == "bzr":
+ print("(bzr add "+" ".join(myautoadd)+")")
+ elif vcs == "hg":
+ print("(hg add "+" ".join(myautoadd)+")")
+ retval=0
+ else:
+ if vcs == "cvs":
+ retval=os.system("cvs add "+" ".join(myautoadd))
+ elif vcs == "svn":
+ retval=os.system("svn add "+" ".join(myautoadd))
+ elif vcs == "git":
+ retval=os.system("git add "+" ".join(myautoadd))
+ elif vcs == "bzr":
+ retval=os.system("bzr add "+" ".join(myautoadd))
+ elif vcs == "hg":
+ retval=os.system("hg add "+" ".join(myautoadd))
+ if retval:
+ writemsg_level("!!! Exiting on %s (shell) error code: %s\n" % \
+ (vcs, retval), level=logging.ERROR, noiselevel=-1)
+ sys.exit(retval)
+
+ if myunadded:
+ print(red("!!! The following files are in your local tree but are not added to the master"))
+ print(red("!!! tree. Please remove them from the local tree or add them to the master tree."))
+ for x in myunadded:
+ print(" ",x)
+ print()
+ print()
+ sys.exit(1)
+
+ if vcs == "hg" and mydeleted:
+ print(red("!!! The following files are removed manually from your local tree but are not"))
+ print(red("!!! removed from the repository. Please remove them, using \"hg remove [FILES]\"."))
+ for x in mydeleted:
+ print(" ",x)
+ print()
+ print()
+ sys.exit(1)
+
+ if vcs == "cvs":
+ mycvstree = cvstree.getentries("./", recursive=1)
+ mychanged = cvstree.findchanged(mycvstree, recursive=1, basedir="./")
+ mynew = cvstree.findnew(mycvstree, recursive=1, basedir="./")
+ myremoved=portage.cvstree.findremoved(mycvstree,recursive=1,basedir="./")
+ bin_blob_pattern = re.compile("^-kb$")
+ no_expansion = set(portage.cvstree.findoption(mycvstree, bin_blob_pattern,
+ recursive=1, basedir="./"))
+
+
+ if vcs == "svn":
+ svnstatus = os.popen("svn status").readlines()
+ mychanged = [ "./" + elem.split()[-1:][0] for elem in svnstatus if (elem[:1] in "MR" or elem[1:2] in "M")]
+ mynew = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A")]
+ myremoved = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("D")]
+
+ # Subversion expands keywords specified in svn:keywords properties.
+ props = os.popen("svn propget -R svn:keywords").readlines()
+ expansion = dict(("./" + prop.split(" - ")[0], prop.split(" - ")[1].split()) \
+ for prop in props if " - " in prop)
+
+ elif vcs == "git":
+ mychanged = os.popen("git diff-index --name-only --relative --diff-filter=M HEAD").readlines()
+ mychanged = ["./" + elem[:-1] for elem in mychanged]
+
+ mynew = os.popen("git diff-index --name-only --relative --diff-filter=A HEAD").readlines()
+ mynew = ["./" + elem[:-1] for elem in mynew]
+
+ myremoved = os.popen("git diff-index --name-only --relative --diff-filter=D HEAD").readlines()
+ myremoved = ["./" + elem[:-1] for elem in myremoved]
+
+ if vcs == "bzr":
+ bzrstatus = os.popen("bzr status -S .").readlines()
+ mychanged = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M" ]
+ mynew = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] in "NK" or elem[0:1] == "R" ) ]
+ myremoved = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("-") ]
+ myremoved = [ "./" + elem.split()[-3:-2][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] == "K" or elem[0:1] == "R" ) ]
+ # Bazaar expands nothing.
+
+ if vcs == "hg":
+ mychanged = os.popen("hg status --no-status --modified .").readlines()
+ mychanged = ["./" + elem.rstrip() for elem in mychanged]
+ mynew = os.popen("hg status --no-status --added .").readlines()
+ mynew = ["./" + elem.rstrip() for elem in mynew]
+ myremoved = os.popen("hg status --no-status --removed .").readlines()
+ myremoved = ["./" + elem.rstrip() for elem in myremoved]
+
+ if vcs:
+ if not (mychanged or mynew or myremoved or (vcs == "hg" and mydeleted)):
+ print(green("RepoMan sez:"), "\"Doing nothing is not always good for QA.\"")
+ print()
+ print("(Didn't find any changed files...)")
+ print()
+ sys.exit(1)
+
+ # Manifests need to be regenerated after all other commits, so don't commit
+ # them now even if they have changed.
+ mymanifests = set()
+ myupdates = set()
+ for f in mychanged + mynew:
+ if "Manifest" == os.path.basename(f):
+ mymanifests.add(f)
+ else:
+ myupdates.add(f)
+ if vcs in ('git', 'hg'):
+ myupdates.difference_update(myremoved)
+ myupdates = list(myupdates)
+ mymanifests = list(mymanifests)
+ myheaders = []
+ mydirty = []
+
+ print("* %s files being committed..." % green(str(len(myupdates))), end=' ')
+ if vcs not in ('cvs', 'svn'):
+ # With git, bzr and hg, there's never any keyword expansion, so
+ # there's no need to regenerate manifests and all files will be
+ # committed in one big commit at the end.
+ print()
+ else:
+ if vcs == 'cvs':
+ headerstring = "'\$(Header|Id).*\$'"
+ elif vcs == "svn":
+ svn_keywords = dict((k.lower(), k) for k in [
+ "Rev",
+ "Revision",
+ "LastChangedRevision",
+ "Date",
+ "LastChangedDate",
+ "Author",
+ "LastChangedBy",
+ "URL",
+ "HeadURL",
+ "Id",
+ "Header",
+ ])
+
+ for myfile in myupdates:
+
+ # for CVS, no_expansion contains files that are excluded from expansion
+ if vcs == "cvs":
+ if myfile in no_expansion:
+ continue
+
+ # for SVN, expansion contains files that are included in expansion
+ elif vcs == "svn":
+ if myfile not in expansion:
+ continue
+
+ # Subversion keywords are case-insensitive in svn:keywords properties, but case-sensitive in contents of files.
+ enabled_keywords = []
+ for k in expansion[myfile]:
+ keyword = svn_keywords.get(k.lower())
+ if keyword is not None:
+ enabled_keywords.append(keyword)
+
+ headerstring = "'\$(%s).*\$'" % "|".join(enabled_keywords)
+
+ myout = subprocess_getstatusoutput("egrep -q "+headerstring+" "+myfile)
+ if myout[0] == 0:
+ myheaders.append(myfile)
+
+ print("%s have headers that will change." % green(str(len(myheaders))))
+ print("* Files with headers will cause the manifests to be changed and committed separately.")
+
+ logging.info("myupdates: %s", myupdates)
+ logging.info("myheaders: %s", myheaders)
+
+ commitmessage = options.commitmsg
+ if options.commitmsgfile:
+ try:
+ f = io.open(_unicode_encode(options.commitmsgfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ commitmessage = f.read()
+ f.close()
+ del f
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ portage.writemsg("!!! File Not Found: --commitmsgfile='%s'\n" % options.commitmsgfile)
+ else:
+ raise
+ # We've read the content so the file is no longer needed.
+ commitmessagefile = None
+ if not commitmessage or not commitmessage.strip():
+ try:
+ editor = os.environ.get("EDITOR")
+ if editor and utilities.editor_is_executable(editor):
+ commitmessage = utilities.get_commit_message_with_editor(
+ editor, message=qa_output)
+ else:
+ commitmessage = utilities.get_commit_message_with_stdin()
+ except KeyboardInterrupt:
+ exithandler()
+ if not commitmessage or not commitmessage.strip():
+ print("* no commit message? aborting commit.")
+ sys.exit(1)
+ commitmessage = commitmessage.rstrip()
+ portage_version = getattr(portage, "VERSION", None)
+ if portage_version is None:
+ sys.stderr.write("Failed to insert portage version in message!\n")
+ sys.stderr.flush()
+ portage_version = "Unknown"
+ unameout = platform.system() + " "
+ if platform.system() in ["Darwin", "SunOS"]:
+ unameout += platform.processor()
+ else:
+ unameout += platform.machine()
+ commitmessage += "\n\n(Portage version: %s/%s/%s" % \
+ (portage_version, vcs, unameout)
+ if options.force:
+ commitmessage += ", RepoMan options: --force"
+ commitmessage += ")"
+
+ if options.ask and userquery('Commit changes?', True) != 'Yes':
+ print("* aborting commit.")
+ sys.exit(1)
+
+ if vcs in ('cvs', 'svn') and (myupdates or myremoved):
+ myfiles = myupdates + myremoved
+ if not myheaders and "sign" not in repoman_settings.features:
+ myfiles += mymanifests
+ fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
+ mymsg = os.fdopen(fd, "wb")
+ mymsg.write(_unicode_encode(commitmessage))
+ mymsg.close()
+
+ print()
+ print(green("Using commit message:"))
+ print(green("------------------------------------------------------------------------------"))
+ print(commitmessage)
+ print(green("------------------------------------------------------------------------------"))
+ print()
+
+ # Having a leading ./ prefix on file paths can trigger a bug in
+ # the cvs server when committing files to multiple directories,
+ # so strip the prefix.
+ myfiles = [f.lstrip("./") for f in myfiles]
+
+ commit_cmd = [vcs]
+ commit_cmd.extend(vcs_global_opts)
+ commit_cmd.append("commit")
+ commit_cmd.extend(vcs_local_opts)
+ commit_cmd.extend(["-F", commitmessagefile])
+ commit_cmd.extend(myfiles)
+
+ try:
+ if options.pretend:
+ print("(%s)" % (" ".join(commit_cmd),))
+ else:
+ retval = spawn(commit_cmd, env=os.environ)
+ if retval != os.EX_OK:
+ writemsg_level(("!!! Exiting on %s (shell) " + \
+ "error code: %s\n") % (vcs, retval),
+ level=logging.ERROR, noiselevel=-1)
+ sys.exit(retval)
+ finally:
+ try:
+ os.unlink(commitmessagefile)
+ except OSError:
+ pass
+
+ # Setup the GPG commands
+ def gpgsign(filename):
+ gpgcmd = repoman_settings.get("PORTAGE_GPG_SIGNING_COMMAND")
+ if gpgcmd is None:
+ raise MissingParameter("PORTAGE_GPG_SIGNING_COMMAND is unset!" + \
+ " Is make.globals missing?")
+ if "${PORTAGE_GPG_KEY}" in gpgcmd and \
+ "PORTAGE_GPG_KEY" not in repoman_settings:
+ raise MissingParameter("PORTAGE_GPG_KEY is unset!")
+ if "${PORTAGE_GPG_DIR}" in gpgcmd:
+ if "PORTAGE_GPG_DIR" not in repoman_settings:
+ repoman_settings["PORTAGE_GPG_DIR"] = \
+ os.path.expanduser("~/.gnupg")
+ logging.info("Automatically setting PORTAGE_GPG_DIR to '%s'" \
+ % repoman_settings["PORTAGE_GPG_DIR"])
+ else:
+ repoman_settings["PORTAGE_GPG_DIR"] = \
+ os.path.expanduser(repoman_settings["PORTAGE_GPG_DIR"])
+ if not os.access(repoman_settings["PORTAGE_GPG_DIR"], os.X_OK):
+ raise portage.exception.InvalidLocation(
+ "Unable to access directory: PORTAGE_GPG_DIR='%s'" % \
+ repoman_settings["PORTAGE_GPG_DIR"])
+ gpgvars = {"FILE": filename}
+ for k in ("PORTAGE_GPG_DIR", "PORTAGE_GPG_KEY"):
+ v = repoman_settings.get(k)
+ if v is not None:
+ gpgvars[k] = v
+ gpgcmd = portage.util.varexpand(gpgcmd, mydict=gpgvars)
+ if options.pretend:
+ print("("+gpgcmd+")")
+ else:
+ rValue = os.system(gpgcmd)
+ if rValue == os.EX_OK:
+ os.rename(filename+".asc", filename)
+ else:
+ raise portage.exception.PortageException("!!! gpg exited with '" + str(rValue) + "' status")
+
+ # When files are removed and re-added, the cvs server will put /Attic/
+ # inside the $Header path. This code detects the problem and corrects it
+ # so that the Manifest will generate correctly. See bug #169500.
+ # Use binary mode in order to avoid potential character encoding issues.
+ cvs_header_re = re.compile(br'^#\s*\$Header.*\$$')
+ attic_str = b'/Attic/'
+ attic_replace = b'/'
+ for x in myheaders:
+ f = open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='rb')
+ mylines = f.readlines()
+ f.close()
+ modified = False
+ for i, line in enumerate(mylines):
+ if cvs_header_re.match(line) is not None and \
+ attic_str in line:
+ mylines[i] = line.replace(attic_str, attic_replace)
+ modified = True
+ if modified:
+ portage.util.write_atomic(x, b''.join(mylines),
+ mode='wb')
+
+ manifest_commit_required = True
+ if vcs in ('cvs', 'svn') and (myupdates or myremoved):
+ myfiles = myupdates + myremoved
+ for x in range(len(myfiles)-1, -1, -1):
+ if myfiles[x].count("/") < 4-repolevel:
+ del myfiles[x]
+ mydone=[]
+ if repolevel==3: # In a package dir
+ repoman_settings["O"] = startdir
+ digestgen(mysettings=repoman_settings, myportdb=portdb)
+ elif repolevel==2: # In a category dir
+ for x in myfiles:
+ xs=x.split("/")
+ if len(xs) < 4-repolevel:
+ continue
+ if xs[0]==".":
+ xs=xs[1:]
+ if xs[0] in mydone:
+ continue
+ mydone.append(xs[0])
+ repoman_settings["O"] = os.path.join(startdir, xs[0])
+ if not os.path.isdir(repoman_settings["O"]):
+ continue
+ digestgen(mysettings=repoman_settings, myportdb=portdb)
+ elif repolevel==1: # repo-cvsroot
+ print(green("RepoMan sez:"), "\"You're rather crazy... doing the entire repository.\"\n")
+ for x in myfiles:
+ xs=x.split("/")
+ if len(xs) < 4-repolevel:
+ continue
+ if xs[0]==".":
+ xs=xs[1:]
+ if "/".join(xs[:2]) in mydone:
+ continue
+ mydone.append("/".join(xs[:2]))
+ repoman_settings["O"] = os.path.join(startdir, xs[0], xs[1])
+ if not os.path.isdir(repoman_settings["O"]):
+ continue
+ digestgen(mysettings=repoman_settings, myportdb=portdb)
+ else:
+ print(red("I'm confused... I don't know where I am!"))
+ sys.exit(1)
+
+ # Force an unsigned commit when more than one Manifest needs to be signed.
+ if repolevel < 3 and "sign" in repoman_settings.features:
+
+ fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
+ mymsg = os.fdopen(fd, "wb")
+ mymsg.write(_unicode_encode(commitmessage))
+ mymsg.write(b"\n (Unsigned Manifest commit)")
+ mymsg.close()
+
+ commit_cmd = [vcs]
+ commit_cmd.extend(vcs_global_opts)
+ commit_cmd.append("commit")
+ commit_cmd.extend(vcs_local_opts)
+ commit_cmd.extend(["-F", commitmessagefile])
+ commit_cmd.extend(f.lstrip("./") for f in mymanifests)
+
+ try:
+ if options.pretend:
+ print("(%s)" % (" ".join(commit_cmd),))
+ else:
+ retval = spawn(commit_cmd, env=os.environ)
+ if retval:
+ writemsg_level(("!!! Exiting on %s (shell) " + \
+ "error code: %s\n") % (vcs, retval),
+ level=logging.ERROR, noiselevel=-1)
+ sys.exit(retval)
+ finally:
+ try:
+ os.unlink(commitmessagefile)
+ except OSError:
+ pass
+ manifest_commit_required = False
+
+ signed = False
+ if "sign" in repoman_settings.features:
+ signed = True
+ myfiles = myupdates + myremoved + mymanifests
+ try:
+ if repolevel==3: # In a package dir
+ repoman_settings["O"] = "."
+ gpgsign(os.path.join(repoman_settings["O"], "Manifest"))
+ elif repolevel==2: # In a category dir
+ mydone=[]
+ for x in myfiles:
+ xs=x.split("/")
+ if len(xs) < 4-repolevel:
+ continue
+ if xs[0]==".":
+ xs=xs[1:]
+ if xs[0] in mydone:
+ continue
+ mydone.append(xs[0])
+ repoman_settings["O"] = os.path.join(".", xs[0])
+ if not os.path.isdir(repoman_settings["O"]):
+ continue
+ gpgsign(os.path.join(repoman_settings["O"], "Manifest"))
+ elif repolevel==1: # repo-cvsroot
+ print(green("RepoMan sez:"), "\"You're rather crazy... doing the entire repository.\"\n")
+ mydone=[]
+ for x in myfiles:
+ xs=x.split("/")
+ if len(xs) < 4-repolevel:
+ continue
+ if xs[0]==".":
+ xs=xs[1:]
+ if "/".join(xs[:2]) in mydone:
+ continue
+ mydone.append("/".join(xs[:2]))
+ repoman_settings["O"] = os.path.join(".", xs[0], xs[1])
+ if not os.path.isdir(repoman_settings["O"]):
+ continue
+ gpgsign(os.path.join(repoman_settings["O"], "Manifest"))
+ except portage.exception.PortageException as e:
+ portage.writemsg("!!! %s\n" % str(e))
+ portage.writemsg("!!! Disabled FEATURES='sign'\n")
+ signed = False
+
+ if vcs == 'git':
+ # It's not safe to use the git commit -a option since there might
+ # be some modified files elsewhere in the working tree that the
+ # user doesn't want to commit. Therefore, call git update-index
+ # in order to ensure that the index is updated with the latest
+ # versions of all new and modified files in the relevant portion
+ # of the working tree.
+ myfiles = mymanifests + myupdates
+ myfiles.sort()
+ update_index_cmd = ["git", "update-index"]
+ update_index_cmd.extend(f.lstrip("./") for f in myfiles)
+ if options.pretend:
+ print("(%s)" % (" ".join(update_index_cmd),))
+ else:
+ retval = spawn(update_index_cmd, env=os.environ)
+ if retval != os.EX_OK:
+ writemsg_level(("!!! Exiting on %s (shell) " + \
+ "error code: %s\n") % (vcs, retval),
+ level=logging.ERROR, noiselevel=-1)
+ sys.exit(retval)
+
+ if vcs in ['git', 'bzr', 'hg'] or manifest_commit_required or signed:
+
+ myfiles = mymanifests[:]
+ if vcs in ['git', 'bzr', 'hg']:
+ myfiles += myupdates
+ myfiles += myremoved
+ myfiles.sort()
+
+ fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
+ mymsg = os.fdopen(fd, "wb")
+ # strip the closing parenthesis
+ mymsg.write(_unicode_encode(commitmessage[:-1]))
+ if signed:
+ mymsg.write(_unicode_encode(
+ ", signed Manifest commit with key %s)" % \
+ repoman_settings["PORTAGE_GPG_KEY"]))
+ else:
+ mymsg.write(b", unsigned Manifest commit)")
+ mymsg.close()
+
+ commit_cmd = []
+ if options.pretend and vcs is None:
+ # substitute a bogus value for pretend output
+ commit_cmd.append("cvs")
+ else:
+ commit_cmd.append(vcs)
+ commit_cmd.extend(vcs_global_opts)
+ commit_cmd.append("commit")
+ commit_cmd.extend(vcs_local_opts)
+ if vcs == "hg":
+ commit_cmd.extend(["--logfile", commitmessagefile])
+ commit_cmd.extend(myfiles)
+ else:
+ commit_cmd.extend(["-F", commitmessagefile])
+ commit_cmd.extend(f.lstrip("./") for f in myfiles)
+
+ try:
+ if options.pretend:
+ print("(%s)" % (" ".join(commit_cmd),))
+ else:
+ retval = spawn(commit_cmd, env=os.environ)
+ if retval != os.EX_OK:
+ writemsg_level(("!!! Exiting on %s (shell) " + \
+ "error code: %s\n") % (vcs, retval),
+ level=logging.ERROR, noiselevel=-1)
+ sys.exit(retval)
+ finally:
+ try:
+ os.unlink(commitmessagefile)
+ except OSError:
+ pass
+
+ print()
+ if vcs:
+ print("Commit complete.")
+ else:
+ print("repoman was too scared by not seeing any familiar version control file that he forgot to commit anything")
+ print(green("RepoMan sez:"), "\"If everyone were like you, I'd be out of business!\"\n")
+sys.exit(0)
+
diff --git a/portage_with_autodep/bin/xpak-helper.py b/portage_with_autodep/bin/xpak-helper.py
new file mode 100755
index 0000000..4766d99
--- /dev/null
+++ b/portage_with_autodep/bin/xpak-helper.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import optparse
+import sys
+import portage
+from portage import os
+
+def command_recompose(args):
+
+ usage = "usage: recompose <binpkg_path> <metadata_dir>\n"
+
+ if len(args) != 2:
+ sys.stderr.write(usage)
+ sys.stderr.write("2 arguments are required, got %s\n" % len(args))
+ return 1
+
+ binpkg_path, metadata_dir = args
+
+ if not os.path.isfile(binpkg_path):
+ sys.stderr.write(usage)
+ sys.stderr.write("Argument 1 is not a regular file: '%s'\n" % \
+ binpkg_path)
+ return 1
+
+ if not os.path.isdir(metadata_dir):
+ sys.stderr.write(usage)
+ sys.stderr.write("Argument 2 is not a directory: '%s'\n" % \
+ metadata_dir)
+ return 1
+
+ t = portage.xpak.tbz2(binpkg_path)
+ t.recompose(metadata_dir)
+ return os.EX_OK
+
+def main(argv):
+
+ if argv and sys.hexversion < 0x3000000 and not isinstance(argv[0], unicode):
+ for i, x in enumerate(argv):
+ argv[i] = portage._unicode_decode(x, errors='strict')
+
+ valid_commands = ('recompose',)
+ description = "Perform metadata operations on a binary package."
+ usage = "usage: %s COMMAND [args]" % \
+ os.path.basename(argv[0])
+
+ parser = optparse.OptionParser(description=description, usage=usage)
+ options, args = parser.parse_args(argv[1:])
+
+ if not args:
+ parser.error("missing command argument")
+
+ command = args[0]
+
+ if command not in valid_commands:
+ parser.error("invalid command: '%s'" % command)
+
+ if command == 'recompose':
+ rval = command_recompose(args[1:])
+ else:
+ raise AssertionError("invalid command: '%s'" % command)
+
+ return rval
+
+if __name__ == "__main__":
+ rval = main(sys.argv[:])
+ sys.exit(rval)
diff --git a/portage_with_autodep/pym/_emerge/AbstractDepPriority.py b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
new file mode 100644
index 0000000..94a9379
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
@@ -0,0 +1,29 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from _emerge.SlotObject import SlotObject
+
+class AbstractDepPriority(SlotObject):
+ __slots__ = ("buildtime", "runtime", "runtime_post")
+
+ def __lt__(self, other):
+ return self.__int__() < other
+
+ def __le__(self, other):
+ return self.__int__() <= other
+
+ def __eq__(self, other):
+ return self.__int__() == other
+
+ def __ne__(self, other):
+ return self.__int__() != other
+
+ def __gt__(self, other):
+ return self.__int__() > other
+
+ def __ge__(self, other):
+ return self.__int__() >= other
+
+ def copy(self):
+ return copy.copy(self)
diff --git a/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
new file mode 100644
index 0000000..4147ecb
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
@@ -0,0 +1,266 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import stat
+import textwrap
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
+import portage
+from portage.elog import messages as elog_messages
+from portage.localization import _
+from portage.package.ebuild._ipc.ExitCommand import ExitCommand
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage import os
+from portage.util._pty import _create_pty_or_pipe
+from portage.util import apply_secpass_permissions
+
+class AbstractEbuildProcess(SpawnProcess):
+
+ __slots__ = ('phase', 'settings',) + \
+ ('_build_dir', '_ipc_daemon', '_exit_command',)
+ _phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
+
+ # Number of milliseconds to allow natural exit of the ebuild
+ # process after it has called the exit command via IPC. It
+ # doesn't hurt to be generous here since the scheduler
+ # continues to process events during this period, and it can
+ # return long before the timeout expires.
+ _exit_timeout = 10000 # 10 seconds
+
+ # The EbuildIpcDaemon support is well tested, but this variable
+ # is left so we can temporarily disable it if any issues arise.
+ _enable_ipc_daemon = True
+
+ def __init__(self, **kwargs):
+ SpawnProcess.__init__(self, **kwargs)
+ if self.phase is None:
+ phase = self.settings.get("EBUILD_PHASE")
+ if not phase:
+ phase = 'other'
+ self.phase = phase
+
+ def _start(self):
+
+ need_builddir = self.phase not in self._phases_without_builddir
+
+ # This can happen if the pre-clean phase triggers
+ # die_hooks for some reason, and PORTAGE_BUILDDIR
+ # doesn't exist yet.
+ if need_builddir and \
+ not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+ msg = _("The ebuild phase '%s' has been aborted "
+ "since PORTAGE_BUILDIR does not exist: '%s'") % \
+ (self.phase, self.settings['PORTAGE_BUILDDIR'])
+ self._eerror(textwrap.wrap(msg, 72))
+ self._set_returncode((self.pid, 1 << 8))
+ self.wait()
+ return
+
+ if self.background:
+ # Automatically prevent color codes from showing up in logs,
+ # since we're not displaying to a terminal anyway.
+ self.settings['NOCOLOR'] = 'true'
+
+ if self._enable_ipc_daemon:
+ self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+ if self.phase not in self._phases_without_builddir:
+ if 'PORTAGE_BUILDIR_LOCKED' not in self.settings:
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=self.settings)
+ self._build_dir.lock()
+ self.settings['PORTAGE_IPC_DAEMON'] = "1"
+ self._start_ipc_daemon()
+ else:
+ self.settings.pop('PORTAGE_IPC_DAEMON', None)
+ else:
+ # Since the IPC daemon is disabled, use a simple tempfile based
+ # approach to detect unexpected exit like in bug #190128.
+ self.settings.pop('PORTAGE_IPC_DAEMON', None)
+ if self.phase not in self._phases_without_builddir:
+ exit_file = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'],
+ '.exit_status')
+ self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file
+ try:
+ os.unlink(exit_file)
+ except OSError:
+ if os.path.exists(exit_file):
+ # make sure it doesn't exist
+ raise
+ else:
+ self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+
+ SpawnProcess._start(self)
+
+ def _init_ipc_fifos(self):
+
+ input_fifo = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'], '.ipc_in')
+ output_fifo = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'], '.ipc_out')
+
+ for p in (input_fifo, output_fifo):
+
+ st = None
+ try:
+ st = os.lstat(p)
+ except OSError:
+ os.mkfifo(p)
+ else:
+ if not stat.S_ISFIFO(st.st_mode):
+ st = None
+ try:
+ os.unlink(p)
+ except OSError:
+ pass
+ os.mkfifo(p)
+
+ apply_secpass_permissions(p,
+ uid=os.getuid(),
+ gid=portage.data.portage_gid,
+ mode=0o770, stat_cached=st)
+
+ return (input_fifo, output_fifo)
+
+ def _start_ipc_daemon(self):
+ self._exit_command = ExitCommand()
+ self._exit_command.reply_hook = self._exit_command_callback
+ query_command = QueryCommand(self.settings, self.phase)
+ commands = {
+ 'best_version' : query_command,
+ 'exit' : self._exit_command,
+ 'has_version' : query_command,
+ }
+ input_fifo, output_fifo = self._init_ipc_fifos()
+ self._ipc_daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo,
+ scheduler=self.scheduler)
+ self._ipc_daemon.start()
+
+ def _exit_command_callback(self):
+ if self._registered:
+ # Let the process exit naturally, if possible.
+ self.scheduler.schedule(self._reg_id, timeout=self._exit_timeout)
+ if self._registered:
+ # If it doesn't exit naturally in a reasonable amount
+ # of time, kill it (solves bug #278895). We try to avoid
+ # this when possible since it makes sandbox complain about
+ # being killed by a signal.
+ self.cancel()
+
+ def _orphan_process_warn(self):
+ phase = self.phase
+
+ msg = _("The ebuild phase '%s' with pid %s appears "
+ "to have left an orphan process running in the "
+ "background.") % (phase, self.pid)
+
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _pipe(self, fd_pipes):
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _can_log(self, slave_fd):
+ # With sesandbox, logging works through a pty but not through a
+ # normal pipe. So, disable logging if ptys are broken.
+ # See Bug #162404.
+ # TODO: Add support for logging via named pipe (fifo) with
+ # sesandbox, since EbuildIpcDaemon uses a fifo and it's known
+ # to be compatible with sesandbox.
+ return not ('sesandbox' in self.settings.features \
+ and self.settings.selinux_enabled()) or os.isatty(slave_fd)
+
+ def _killed_by_signal(self, signum):
+ msg = _("The ebuild phase '%s' has been "
+ "killed by signal %s.") % (self.phase, signum)
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _unexpected_exit(self):
+
+ phase = self.phase
+
+ msg = _("The ebuild phase '%s' has exited "
+ "unexpectedly. This type of behavior "
+ "is known to be triggered "
+ "by things such as failed variable "
+ "assignments (bug #190128) or bad substitution "
+ "errors (bug #200313). Normally, before exiting, bash should "
+ "have displayed an error message above. If bash did not "
+ "produce an error message above, it's possible "
+ "that the ebuild has called `exit` when it "
+ "should have called `die` instead. This behavior may also "
+ "be triggered by a corrupt bash binary or a hardware "
+ "problem such as memory or cpu malfunction. If the problem is not "
+ "reproducible or it appears to occur randomly, then it is likely "
+ "to be triggered by a hardware problem. "
+ "If you suspect a hardware problem then you should "
+ "try some basic hardware diagnostics such as memtest. "
+ "Please do not report this as a bug unless it is consistently "
+ "reproducible and you are sure that your bash binary and hardware "
+ "are functioning properly.") % phase
+
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _eerror(self, lines):
+ self._elog('eerror', lines)
+
+ def _elog(self, elog_funcname, lines):
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ if msg:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ self.scheduler.output(msg, log_path=log_path)
+
+ def _log_poll_exception(self, event):
+ self._elog("eerror",
+ ["%s received strange poll event: %s\n" % \
+ (self.__class__.__name__, event,)])
+
+ def _set_returncode(self, wait_retval):
+ SpawnProcess._set_returncode(self, wait_retval)
+
+ if self._ipc_daemon is not None:
+ self._ipc_daemon.cancel()
+ if self._exit_command.exitcode is not None:
+ self.returncode = self._exit_command.exitcode
+ else:
+ if self.returncode < 0:
+ if not self.cancelled:
+ self._killed_by_signal(-self.returncode)
+ else:
+ self.returncode = 1
+ if not self.cancelled:
+ self._unexpected_exit()
+ if self._build_dir is not None:
+ self._build_dir.unlock()
+ self._build_dir = None
+ elif not self.cancelled:
+ exit_file = self.settings.get('PORTAGE_EBUILD_EXIT_FILE')
+ if exit_file and not os.path.exists(exit_file):
+ if self.returncode < 0:
+ if not self.cancelled:
+ self._killed_by_signal(-self.returncode)
+ else:
+ self.returncode = 1
+ if not self.cancelled:
+ self._unexpected_exit()
diff --git a/portage_with_autodep/pym/_emerge/AbstractPollTask.py b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
new file mode 100644
index 0000000..f7f3a95
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
@@ -0,0 +1,62 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import logging
+
+from portage.util import writemsg_level
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollConstants import PollConstants
+class AbstractPollTask(AsynchronousTask):
+
+ __slots__ = ("scheduler",) + \
+ ("_registered",)
+
+ _bufsize = 4096
+ _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
+ _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
+ _exceptional_events
+
+ def isAlive(self):
+ return bool(self._registered)
+
+ def _read_buf(self, f, event):
+ """
+ | POLLIN | RETURN
+ | BIT | VALUE
+ | ---------------------------------------------------
+ | 1 | Read self._bufsize into an instance of
+ | | array.array('B') and return it, ignoring
+ | | EOFError and IOError. An empty array
+ | | indicates EOF.
+ | ---------------------------------------------------
+ | 0 | None
+ """
+ buf = None
+ if event & PollConstants.POLLIN:
+ buf = array.array('B')
+ try:
+ buf.fromfile(f, self._bufsize)
+ except (EOFError, IOError):
+ pass
+ return buf
+
+ def _unregister(self):
+ raise NotImplementedError(self)
+
+ def _log_poll_exception(self, event):
+ writemsg_level(
+ "!!! %s received strange poll event: %s\n" % \
+ (self.__class__.__name__, event,),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _unregister_if_appropriate(self, event):
+ if self._registered:
+ if event & self._exceptional_events:
+ self._log_poll_exception(event)
+ self._unregister()
+ self.cancel()
+ elif event & PollConstants.POLLHUP:
+ self._unregister()
+ self.wait()
+
diff --git a/portage_with_autodep/pym/_emerge/AsynchronousLock.py b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
new file mode 100644
index 0000000..637ba73
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
@@ -0,0 +1,288 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import dummy_threading
+import fcntl
+import logging
+import sys
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+from portage import os
+from portage.exception import TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.util import writemsg_level
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollConstants import PollConstants
+from _emerge.SpawnProcess import SpawnProcess
+
+class AsynchronousLock(AsynchronousTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using either a thread (if available) or a subprocess.
+
+ The default behavior is to use a process instead of a thread, since
+ there is currently no way to interrupt a thread that is waiting for
+ a lock (notably, SIGINT doesn't work because python delivers all
+ signals to the main thread).
+ """
+
+ __slots__ = ('path', 'scheduler',) + \
+ ('_imp', '_force_async', '_force_dummy', '_force_process', \
+ '_force_thread', '_waiting')
+
+ _use_process_by_default = True
+
+ def _start(self):
+
+ if not self._force_async:
+ try:
+ self._imp = lockfile(self.path,
+ wantnewlockfile=True, flags=os.O_NONBLOCK)
+ except TryAgain:
+ pass
+ else:
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ if self._force_process or \
+ (not self._force_thread and \
+ (self._use_process_by_default or threading is dummy_threading)):
+ self._imp = _LockProcess(path=self.path, scheduler=self.scheduler)
+ else:
+ self._imp = _LockThread(path=self.path,
+ scheduler=self.scheduler,
+ _force_dummy=self._force_dummy)
+
+ self._imp.addExitListener(self._imp_exit)
+ self._imp.start()
+
+ def _imp_exit(self, imp):
+ # call exit listeners
+ if not self._waiting:
+ self.wait()
+
+ def _cancel(self):
+ if isinstance(self._imp, AsynchronousTask):
+ self._imp.cancel()
+
+ def _poll(self):
+ if isinstance(self._imp, AsynchronousTask):
+ self._imp.poll()
+ return self.returncode
+
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._waiting = True
+ self.returncode = self._imp.wait()
+ self._waiting = False
+ return self.returncode
+
+ def unlock(self):
+ if self._imp is None:
+ raise AssertionError('not locked')
+ if isinstance(self._imp, (_LockProcess, _LockThread)):
+ self._imp.unlock()
+ else:
+ unlockfile(self._imp)
+ self._imp = None
+
+class _LockThread(AbstractPollTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using a background thread. After the lock is acquired, the thread
+ writes to a pipe in order to notify a poll loop running in the main
+ thread.
+
+ If the threading module is unavailable then the dummy_threading
+ module will be used, and the lock will be acquired synchronously
+ (before the start() method returns).
+ """
+
+ __slots__ = ('path',) + \
+ ('_files', '_force_dummy', '_lock_obj',
+ '_thread', '_reg_id',)
+
+ def _start(self):
+ pr, pw = os.pipe()
+ self._files = {}
+ self._files['pipe_read'] = os.fdopen(pr, 'rb', 0)
+ self._files['pipe_write'] = os.fdopen(pw, 'wb', 0)
+ for k, f in self._files.items():
+ fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
+ fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+ self._reg_id = self.scheduler.register(self._files['pipe_read'].fileno(),
+ PollConstants.POLLIN, self._output_handler)
+ self._registered = True
+ threading_mod = threading
+ if self._force_dummy:
+ threading_mod = dummy_threading
+ self._thread = threading_mod.Thread(target=self._run_lock)
+ self._thread.start()
+