diff options
author | 2014-02-17 17:55:51 +0600 | |
---|---|---|
committer | 2014-02-17 17:55:51 +0600 | |
commit | 5a3f506c9ef1cfd78940b0509f10ef94b4434e29 (patch) | |
tree | 147c35a17a8bcd8ff467bb3063adab623da51fac /portage_with_autodep/pym/portage | |
parent | fixed a deadlock (diff) | |
download | autodep-5a3f506c9ef1cfd78940b0509f10ef94b4434e29.tar.gz autodep-5a3f506c9ef1cfd78940b0509f10ef94b4434e29.tar.bz2 autodep-5a3f506c9ef1cfd78940b0509f10ef94b4434e29.zip |
updated portage to 2.2.8-r1
Diffstat (limited to 'portage_with_autodep/pym/portage')
388 files changed, 11768 insertions, 10348 deletions
diff --git a/portage_with_autodep/pym/portage/__init__.py b/portage_with_autodep/pym/portage/__init__.py index 2a2eb99..431dc26 100644 --- a/portage_with_autodep/pym/portage/__init__.py +++ b/portage_with_autodep/pym/portage/__init__.py @@ -2,7 +2,7 @@ # Copyright 1998-2011 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 -VERSION="HEAD" +VERSION="2.2.0_alpha108" # =========================================================================== # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT @@ -148,20 +148,35 @@ if sys.hexversion >= 0x3000000: basestring = str long = int -# Assume utf_8 fs encoding everywhere except in merge code, where the -# user's locale is respected. +# We use utf_8 encoding everywhere. Previously, we used +# sys.getfilesystemencoding() for the 'merge' encoding, but that had +# various problems: +# +# 1) If the locale is ever changed then it can cause orphan files due +# to changed character set translation. +# +# 2) Ebuilds typically install files with utf_8 encoded file names, +# and then portage would be forced to rename those files to match +# sys.getfilesystemencoding(), possibly breaking things. +# +# 3) Automatic translation between encodings can lead to nonsensical +# file names when the source encoding is unknown by portage. +# +# 4) It's inconvenient for ebuilds to convert the encodings of file +# names to match the current locale, and upstreams typically encode +# file names with utf_8 encoding. +# +# So, instead of relying on sys.getfilesystemencoding(), we avoid the above +# problems by using a constant utf_8 'merge' encoding for all locales, as +# discussed in bug #382199 and bug #381509. _encodings = { 'content' : 'utf_8', 'fs' : 'utf_8', - 'merge' : sys.getfilesystemencoding(), + 'merge' : 'utf_8', 'repo.content' : 'utf_8', 'stdio' : 'utf_8', } -# This can happen if python is built with USE=build (stage 1). -if _encodings['merge'] is None: - _encodings['merge'] = 'ascii' - if sys.hexversion >= 0x3000000: def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'): if isinstance(s, str): @@ -215,7 +230,7 @@ class _unicode_func_wrapper(object): rval = self._func(*wrapped_args, **wrapped_kwargs) # Don't use isinstance() since we don't want to convert subclasses - # of tuple such as posix.stat_result in python-3.2. + # of tuple such as posix.stat_result in Python >=3.2. if rval.__class__ in (list, tuple): decoded_rval = [] for x in rval: @@ -320,6 +335,16 @@ _python_interpreter = os.path.realpath(sys.executable) _bin_path = PORTAGE_BIN_PATH _pym_path = PORTAGE_PYM_PATH +if sys.hexversion >= 0x3030000: + # Workaround for http://bugs.python.org/issue14007 + def _test_xml_etree_ElementTree_TreeBuilder_type(): + import subprocess + p = subprocess.Popen([_python_interpreter, "-c", + "import sys, xml.etree.ElementTree; sys.exit(not isinstance(xml.etree.ElementTree.TreeBuilder, type))"]) + if p.wait() != 0: + sys.modules["_elementtree"] = None + _test_xml_etree_ElementTree_TreeBuilder_type() + def _shell_quote(s): """ Quote a string in double-quotes and use backslashes to @@ -380,9 +405,12 @@ def getcwd(): return "/" getcwd() -def abssymlink(symlink): +def abssymlink(symlink, target=None): "This reads symlinks, resolving the relative symlinks, and returning the absolute." - mylink=os.readlink(symlink) + if target is not None: + mylink = target + else: + mylink = os.readlink(symlink) if mylink[0] != '/': mydir=os.path.dirname(symlink) mylink=mydir+"/"+mylink @@ -417,29 +445,25 @@ def eapi_is_supported(eapi): return False return eapi <= portage.const.EAPI -# Generally, it's best not to assume that cache entries for unsupported EAPIs -# can be validated. However, the current package manager specification does not -# guarantee that the EAPI can be parsed without sourcing the ebuild, so -# it's too costly to discard existing cache entries for unsupported EAPIs. -# Therefore, by default, assume that cache entries for unsupported EAPIs can be -# validated. If FEATURES=parse-eapi-* is enabled, this assumption is discarded -# since the EAPI can be determined without the incurring the cost of sourcing -# the ebuild. -_validate_cache_for_unsupported_eapis = True - -_parse_eapi_ebuild_head_re = re.compile(r'^EAPI=[\'"]?([^\'"#]*)') -_parse_eapi_ebuild_head_max_lines = 30 +# This pattern is specified by PMS section 7.3.1. +_pms_eapi_re = re.compile(r"^[ \t]*EAPI=(['\"]?)([A-Za-z0-9+_.-]*)\1[ \t]*([ \t]#.*)?$") +_comment_or_blank_line = re.compile(r"^\s*(#.*)?$") def _parse_eapi_ebuild_head(f): - count = 0 + eapi = None + eapi_lineno = None + lineno = 0 for line in f: - m = _parse_eapi_ebuild_head_re.match(line) - if m is not None: - return m.group(1).strip() - count += 1 - if count >= _parse_eapi_ebuild_head_max_lines: + lineno += 1 + m = _comment_or_blank_line.match(line) + if m is None: + eapi_lineno = lineno + m = _pms_eapi_re.match(line) + if m is not None: + eapi = m.group(2) break - return '0' + + return (eapi, eapi_lineno) def _movefile(src, dest, **kwargs): """Calls movefile and raises a PortageException if an error occurs.""" @@ -461,10 +485,16 @@ def portageexit(): if data.secpass > 1 and os.environ.get("SANDBOX_ON") != "1": close_portdbapi_caches() -def create_trees(config_root=None, target_root=None, trees=None): - if trees is None: - trees = {} - else: +class _trees_dict(dict): + __slots__ = ('_running_eroot', '_target_eroot',) + def __init__(self, *pargs, **kargs): + dict.__init__(self, *pargs, **kargs) + self._running_eroot = None + self._target_eroot = None + +def create_trees(config_root=None, target_root=None, trees=None, env=None, + eprefix=None): + if trees is not None: # clean up any existing portdbapi instances for myroot in trees: portdb = trees[myroot]["porttree"].dbapi @@ -472,12 +502,25 @@ def create_trees(config_root=None, target_root=None, trees=None): portdbapi.portdbapi_instances.remove(portdb) del trees[myroot]["porttree"], myroot, portdb + if trees is None: + trees = _trees_dict() + elif not isinstance(trees, _trees_dict): + # caller passed a normal dict or something, + # but we need a _trees_dict instance + trees = _trees_dict(trees) + + if env is None: + env = os.environ + settings = config(config_root=config_root, target_root=target_root, - config_incrementals=portage.const.INCREMENTALS) + env=env, eprefix=eprefix) settings.lock() - myroots = [(settings["ROOT"], settings)] - if settings["ROOT"] != "/": + trees._target_eroot = settings['EROOT'] + myroots = [(settings['EROOT'], settings)] + if settings["ROOT"] == "/": + trees._running_eroot = trees._target_eroot + else: # When ROOT != "/" we only want overrides from the calling # environment to apply to the config that's associated @@ -485,24 +528,27 @@ def create_trees(config_root=None, target_root=None, trees=None): clean_env = {} for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_USERNAME', 'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM', - 'ftp_proxy', 'http_proxy', 'no_proxy'): + 'ftp_proxy', 'http_proxy', 'no_proxy', + '__PORTAGE_TEST_HARDLINK_LOCKS'): v = settings.get(k) if v is not None: clean_env[k] = v - settings = config(config_root=None, target_root="/", env=clean_env) + settings = config(config_root=None, target_root="/", + env=clean_env, eprefix=eprefix) settings.lock() - myroots.append((settings["ROOT"], settings)) + trees._running_eroot = settings['EROOT'] + myroots.append((settings['EROOT'], settings)) for myroot, mysettings in myroots: trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {})) trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals) trees[myroot].addLazySingleton( - "vartree", vartree, myroot, categories=mysettings.categories, + "vartree", vartree, categories=mysettings.categories, settings=mysettings) trees[myroot].addLazySingleton("porttree", - portagetree, myroot, settings=mysettings) + portagetree, settings=mysettings) trees[myroot].addLazySingleton("bintree", - binarytree, myroot, mysettings["PKGDIR"], settings=mysettings) + binarytree, pkgdir=mysettings["PKGDIR"], settings=mysettings) return trees if VERSION == 'HEAD': diff --git a/portage_with_autodep/pym/portage/__init__.pyo b/portage_with_autodep/pym/portage/__init__.pyo Binary files differnew file mode 100644 index 0000000..9fc449e --- /dev/null +++ b/portage_with_autodep/pym/portage/__init__.pyo diff --git a/portage_with_autodep/pym/portage/_emirrordist/Config.py b/portage_with_autodep/pym/portage/_emirrordist/Config.py new file mode 100644 index 0000000..db4bfeb --- /dev/null +++ b/portage_with_autodep/pym/portage/_emirrordist/Config.py @@ -0,0 +1,132 @@ +# Copyright 2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import copy +import io +import logging +import shelve +import sys +import time + +import portage +from portage import os +from portage.util import grabdict, grablines +from portage.util._ShelveUnicodeWrapper import ShelveUnicodeWrapper + +class Config(object): + def __init__(self, options, portdb, event_loop): + self.options = options + self.portdb = portdb + self.event_loop = event_loop + self.added_byte_count = 0 + self.added_file_count = 0 + self.scheduled_deletion_count = 0 + self.delete_count = 0 + self.file_owners = {} + self.file_failures = {} + self.start_time = time.time() + self._open_files = [] + + self.log_success = self._open_log('success', options.success_log, 'a') + self.log_failure = self._open_log('failure', options.failure_log, 'a') + + self.distfiles = None + if options.distfiles is not None: + self.distfiles = options.distfiles + + self.mirrors = copy.copy(portdb.settings.thirdpartymirrors()) + + if options.mirror_overrides is not None: + self.mirrors.update(grabdict(options.mirror_overrides)) + + if options.mirror_skip is not None: + for x in options.mirror_skip.split(","): + self.mirrors[x] = [] + + self.whitelist = None + if options.whitelist_from is not None: + self.whitelist = set() + for filename in options.whitelist_from: + for line in grablines(filename): + line = line.strip() + if line and not line.startswith("#"): + self.whitelist.add(line) + + self.restrict_mirror_exemptions = None + if options.restrict_mirror_exemptions is not None: + self.restrict_mirror_exemptions = frozenset( + options.restrict_mirror_exemptions.split(",")) + + self.recycle_db = None + if options.recycle_db is not None: + self.recycle_db = self._open_shelve( + options.recycle_db, 'recycle') + + self.distfiles_db = None + if options.distfiles_db is not None: + self.distfiles_db = self._open_shelve( + options.distfiles_db, 'distfiles') + + self.deletion_db = None + if options.deletion_db is not None: + self.deletion_db = self._open_shelve( + options.deletion_db, 'deletion') + + def _open_log(self, log_desc, log_path, mode): + + if log_path is None or self.options.dry_run: + log_func = logging.info + line_format = "%s: %%s" % log_desc + add_newline = False + if log_path is not None: + logging.warn(("dry-run: %s log " + "redirected to logging.info") % log_desc) + else: + self._open_files.append(io.open(log_path, mode=mode, + encoding='utf_8')) + line_format = "%s\n" + log_func = self._open_files[-1].write + + return self._LogFormatter(line_format, log_func) + + class _LogFormatter(object): + + __slots__ = ('_line_format', '_log_func') + + def __init__(self, line_format, log_func): + self._line_format = line_format + self._log_func = log_func + + def __call__(self, msg): + self._log_func(self._line_format % (msg,)) + + def _open_shelve(self, db_file, db_desc): + if self.options.dry_run: + open_flag = "r" + else: + open_flag = "c" + + if self.options.dry_run and not os.path.exists(db_file): + db = {} + else: + db = shelve.open(db_file, flag=open_flag) + if sys.hexversion < 0x3000000: + db = ShelveUnicodeWrapper(db) + + if self.options.dry_run: + logging.warn("dry-run: %s db opened in readonly mode" % db_desc) + if not isinstance(db, dict): + volatile_db = dict((k, db[k]) for k in db) + db.close() + db = volatile_db + else: + self._open_files.append(db) + + return db + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + while self._open_files: + self._open_files.pop().close() diff --git a/portage_with_autodep/pym/portage/_emirrordist/DeletionIterator.py b/portage_with_autodep/pym/portage/_emirrordist/DeletionIterator.py new file mode 100644 index 0000000..dff52c0 --- /dev/null +++ b/portage_with_autodep/pym/portage/_emirrordist/DeletionIterator.py @@ -0,0 +1,83 @@ +# Copyright 2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import logging +import stat + +from portage import os +from .DeletionTask import DeletionTask + +class DeletionIterator(object): + + def __init__(self, config): + self._config = config + + def __iter__(self): + distdir = self._config.options.distfiles + file_owners = self._config.file_owners + whitelist = self._config.whitelist + distfiles_local = self._config.options.distfiles_local + deletion_db = self._config.deletion_db + deletion_delay = self._config.options.deletion_delay + start_time = self._config.start_time + distfiles_set = set(os.listdir(self._config.options.distfiles)) + for filename in distfiles_set: + try: + st = os.stat(os.path.join(distdir, filename)) + except OSError as e: + logging.error("stat failed on '%s' in distfiles: %s\n" % + (filename, e)) + continue + if not stat.S_ISREG(st.st_mode): + continue + elif filename in file_owners: + if deletion_db is not None: + try: + del deletion_db[filename] + except KeyError: + pass + elif whitelist is not None and filename in whitelist: + if deletion_db is not None: + try: + del deletion_db[filename] + except KeyError: + pass + elif distfiles_local is not None and \ + os.path.exists(os.path.join(distfiles_local, filename)): + if deletion_db is not None: + try: + del deletion_db[filename] + except KeyError: + pass + else: + self._config.scheduled_deletion_count += 1 + + if deletion_db is None or deletion_delay is None: + + yield DeletionTask(background=True, + distfile=filename, + config=self._config) + + else: + deletion_entry = deletion_db.get(filename) + + if deletion_entry is None: + logging.debug("add '%s' to deletion db" % filename) + deletion_db[filename] = start_time + + elif deletion_entry + deletion_delay <= start_time: + + yield DeletionTask(background=True, + distfile=filename, + config=self._config) + + if deletion_db is not None: + for filename in list(deletion_db): + if filename not in distfiles_set: + try: + del deletion_db[filename] + except KeyError: + pass + else: + logging.debug("drop '%s' from deletion db" % + filename) diff --git a/portage_with_autodep/pym/portage/_emirrordist/DeletionTask.py b/portage_with_autodep/pym/portage/_emirrordist/DeletionTask.py new file mode 100644 index 0000000..7d10957 --- /dev/null +++ b/portage_with_autodep/pym/portage/_emirrordist/DeletionTask.py @@ -0,0 +1,129 @@ +# Copyright 2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import errno +import logging + +from portage import os +from portage.util._async.FileCopier import FileCopier +from _emerge.CompositeTask import CompositeTask + +class DeletionTask(CompositeTask): + + __slots__ = ('distfile', 'config') + + def _start(self): + + distfile_path = os.path.join( + self.config.options.distfiles, self.distfile) + + if self.config.options.recycle_dir is not None: + distfile_path = os.path.join(self.config.options.distfiles, self.distfile) + recycle_path = os.path.join( + self.config.options.recycle_dir, self.distfile) + if self.config.options.dry_run: + logging.info(("dry-run: move '%s' from " + "distfiles to recycle") % self.distfile) + else: + logging.debug(("move '%s' from " + "distfiles to recycle") % self.distfile) + try: + os.rename(distfile_path, recycle_path) + except OSError as e: + if e.errno != errno.EXDEV: + logging.error(("rename %s from distfiles to " + "recycle failed: %s") % (self.distfile, e)) + else: + self.returncode = os.EX_OK + self._async_wait() + return + + self._start_task( + FileCopier(src_path=distfile_path, + dest_path=recycle_path, + background=False), + self._recycle_copier_exit) + return + + success = True + + if self.config.options.dry_run: + logging.info(("dry-run: delete '%s' from " + "distfiles") % self.distfile) + else: + logging.debug(("delete '%s' from " + "distfiles") % self.distfile) + try: + os.unlink(distfile_path) + except OSError as e: + if e.errno not in (errno.ENOENT, errno.ESTALE): + logging.error("%s unlink failed in distfiles: %s" % + (self.distfile, e)) + success = False + + if success: + self._success() + self.returncode = os.EX_OK + else: + self.returncode = 1 + + self._async_wait() + + def _recycle_copier_exit(self, copier): + + self._assert_current(copier) + if self._was_cancelled(): + self.wait() + return + + success = True + if copier.returncode == os.EX_OK: + + try: + os.unlink(copier.src_path) + except OSError as e: + if e.errno not in (errno.ENOENT, errno.ESTALE): + logging.error("%s unlink failed in distfiles: %s" % + (self.distfile, e)) + success = False + + else: + logging.error(("%s copy from distfiles " + "to recycle failed: %s") % (self.distfile, e)) + success = False + + if success: + self._success() + self.returncode = os.EX_OK + else: + self.returncode = 1 + + self._current_task = None + self.wait() + + def _success(self): + + cpv = "unknown" + if self.config.distfiles_db is not None: + cpv = self.config.distfiles_db.get(self.distfile, cpv) + + self.config.delete_count += 1 + self.config.log_success("%s\t%s\tremoved" % (cpv, self.distfile)) + + if self.config.distfiles_db is not None: + try: + del self.config.distfiles_db[self.distfile] + except KeyError: + pass + else: + logging.debug(("drop '%s' from " + "distfiles db") % self.distfile) + + if self.config.deletion_db is not None: + try: + del self.config.deletion_db[self.distfile] + except KeyError: + pass + else: + logging.debug(("drop '%s' from " + "deletion db") % self.distfile) diff --git a/portage_with_autodep/pym/portage/_emirrordist/FetchIterator.py b/portage_with_autodep/pym/portage/_emirrordist/FetchIterator.py new file mode 100644 index 0000000..16a0b04 --- /dev/null +++ b/portage_with_autodep/pym/portage/_emirrordist/FetchIterator.py @@ -0,0 +1,147 @@ +# Copyright 2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +from portage import os +from portage.checksum import (_apply_hash_filter, + _filter_unaccelarated_hashes, _hash_filter) +from portage.dep import use_reduce +from portage.exception import PortageException +from .FetchTask import FetchTask + +class FetchIterator(object): + + def __init__(self, config): + self._config = config + self._log_failure = config.log_failure + + def _iter_every_cp(self): + # List categories individually, in order to start yielding quicker, + # and in order to reduce latency in case of a signal interrupt. + cp_all = self._config.portdb.cp_all + for category in sorted(self._config.portdb.categories): + for cp in cp_all(categories=(category,)): + yield cp + + def __iter__(self): + + portdb = self._config.portdb + get_repo_for_location = portdb.repositories.get_repo_for_location + file_owners = self._config.file_owners + file_failures = self._config.file_failures + restrict_mirror_exemptions = self._config.restrict_mirror_exemptions + + hash_filter = _hash_filter( + portdb.settings.get("PORTAGE_CHECKSUM_FILTER", "")) + if hash_filter.transparent: + hash_filter = None + + for cp in self._iter_every_cp(): + + for tree in portdb.porttrees: + + # Reset state so the Manifest is pulled once + # for this cp / tree combination. + digests = None + repo_config = get_repo_for_location(tree) + + for cpv in portdb.cp_list(cp, mytree=tree): + + try: + restrict, = portdb.aux_get(cpv, ("RESTRICT",), + mytree=tree) + except (KeyError, PortageException) as e: + self._log_failure("%s\t\taux_get exception %s" % + (cpv, e)) + continue + + # Here we use matchnone=True to ignore conditional parts + # of RESTRICT since they don't apply unconditionally. + # Assume such conditionals only apply on the client side. + try: + restrict = frozenset(use_reduce(restrict, + flat=True, matchnone=True)) + except PortageException as e: + self._log_failure("%s\t\tuse_reduce exception %s" % + (cpv, e)) + continue + + if "fetch" in restrict: + continue + + try: + uri_map = portdb.getFetchMap(cpv) + except PortageException as e: + self._log_failure("%s\t\tgetFetchMap exception %s" % + (cpv, e)) + continue + + if not uri_map: + continue + + if "mirror" in restrict: + skip = False + if restrict_mirror_exemptions is not None: + new_uri_map = {} + for filename, uri_tuple in uri_map.items(): + for uri in uri_tuple: + if uri[:9] == "mirror://": + i = uri.find("/", 9) + if i != -1 and uri[9:i].strip("/") in \ + restrict_mirror_exemptions: + new_uri_map[filename] = uri_tuple + break + if new_uri_map: + uri_map = new_uri_map + else: + skip = True + else: + skip = True + + if skip: + continue + + # Parse Manifest for this cp if we haven't yet. + if digests is None: + try: + digests = repo_config.load_manifest( + os.path.join(repo_config.location, cp) + ).getTypeDigests("DIST") + except (EnvironmentError, PortageException) as e: + for filename in uri_map: + self._log_failure( + "%s\t%s\tManifest exception %s" % + (cpv, filename, e)) + file_failures[filename] = cpv + continue + + if not digests: + for filename in uri_map: + self._log_failure("%s\t%s\tdigest entry missing" % + (cpv, filename)) + file_failures[filename] = cpv + continue + + for filename, uri_tuple in uri_map.items(): + file_digests = digests.get(filename) + if file_digests is None: + self._log_failure("%s\t%s\tdigest entry missing" % + (cpv, filename)) + file_failures[filename] = cpv + continue + if filename in file_owners: + continue + file_owners[filename] = cpv + + file_digests = \ + _filter_unaccelarated_hashes(file_digests) + if hash_filter is not None: + file_digests = _apply_hash_filter( + file_digests, hash_filter) + + yield FetchTask(cpv=cpv, + background=True, + digests=file_digests, + distfile=filename, + restrict=restrict, + uri_tuple=uri_tuple, + config=self._config) diff --git a/portage_with_autodep/pym/portage/_emirrordist/FetchTask.py b/portage_with_autodep/pym/portage/_emirrordist/FetchTask.py new file mode 100644 index 0000000..66c41c1 --- /dev/null +++ b/portage_with_autodep/pym/portage/_emirrordist/FetchTask.py @@ -0,0 +1,629 @@ +# Copyright 2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import collections +import errno +import logging +import random +import stat +import subprocess +import sys + +import portage +from portage import _encodings, _unicode_encode +from portage import os +from portage.util._async.FileCopier import FileCopier +from portage.util._async.FileDigester import FileDigester +from portage.util._async.PipeLogger import PipeLogger +from portage.util._async.PopenProcess import PopenProcess +from _emerge.CompositeTask import CompositeTask + +default_hash_name = portage.const.MANIFEST2_REQUIRED_HASH + +# Use --no-check-certificate since Manifest digests should provide +# enough security, and certificates can be self-signed or whatnot. +default_fetchcommand = "wget -c -v -t 1 --passive-ftp --no-check-certificate --timeout=60 -O \"${DISTDIR}/${FILE}\" \"${URI}\"" + +class FetchTask(CompositeTask): + + __slots__ = ('distfile', 'digests', 'config', 'cpv', + 'restrict', 'uri_tuple', '_current_mirror', + '_current_stat', '_fetch_tmp_dir_info', '_fetch_tmp_file', + '_fs_mirror_stack', '_mirror_stack', + '_previously_added', + '_primaryuri_stack', '_log_path', '_tried_uris') + + def _start(self): + + if self.config.options.fetch_log_dir is not None and \ + not self.config.options.dry_run: + self._log_path = os.path.join( + self.config.options.fetch_log_dir, + self.distfile + '.log') + + self._previously_added = True + if self.config.distfiles_db is not None and \ + self.distfile not in self.config.distfiles_db: + self._previously_added = False + self.config.distfiles_db[self.distfile] = self.cpv + + if not self._have_needed_digests(): + msg = "incomplete digests: %s" % " ".join(self.digests) + self.scheduler.output(msg, background=self.background, + log_path=self._log_path) + self.config.log_failure("%s\t%s\t%s" % + (self.cpv, self.distfile, msg)) + self.config.file_failures[self.distfile] = self.cpv + self.returncode = os.EX_OK + self._async_wait() + return + + distfile_path = os.path.join( + self.config.options.distfiles, self.distfile) + + st = None + size_ok = False + try: + st = os.stat(distfile_path) + except OSError as e: + if e.errno not in (errno.ENOENT, errno.ESTALE): + msg = "%s stat failed in %s: %s" % \ + (self.distfile, "distfiles", e) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + else: + size_ok = st.st_size == self.digests["size"] + + if not size_ok: + if self.config.options.dry_run: + if st is not None: + logging.info(("dry-run: delete '%s' with " + "wrong size from distfiles") % (self.distfile,)) + else: + # Do the unlink in order to ensure that the path is clear, + # even if stat raised ENOENT, since a broken symlink can + # trigger ENOENT. + if self._unlink_file(distfile_path, "distfiles"): + if st is not None: + logging.debug(("delete '%s' with " + "wrong size from distfiles") % (self.distfile,)) + else: + self.config.log_failure("%s\t%s\t%s" % + (self.cpv, self.distfile, "unlink failed in distfiles")) + self.returncode = os.EX_OK + self._async_wait() + return + + if size_ok: + if self.config.options.verify_existing_digest: + self._start_task( + FileDigester(file_path=distfile_path, + hash_names=(self._select_hash(),), + background=self.background, + logfile=self._log_path), self._distfiles_digester_exit) + return + + self._success() + self.returncode = os.EX_OK + self._async_wait() + return + + self._start_fetch() + + def _success(self): + if not self._previously_added: + size = self.digests["size"] + self.config.added_byte_count += size + self.config.added_file_count += 1 + self.config.log_success("%s\t%s\tadded %i bytes" % + (self.cpv, self.distfile, size)) + + if self._log_path is not None: + if not self.config.options.dry_run: + try: + os.unlink(self._log_path) + except OSError: + pass + + if self.config.options.recycle_dir is not None: + + recycle_file = os.path.join( + self.config.options.recycle_dir, self.distfile) + + if self.config.options.dry_run: + if os.path.exists(recycle_file): + logging.info("dry-run: delete '%s' from recycle" % + (self.distfile,)) + else: + try: + os.unlink(recycle_file) + except OSError: + pass + else: + logging.debug("delete '%s' from recycle" % + (self.distfile,)) + + def _distfiles_digester_exit(self, digester): + + self._assert_current(digester) + if self._was_cancelled(): + self.wait() + return + + if self._default_exit(digester) != os.EX_OK: + # IOError reading file in our main distfiles directory? This + # is a bad situation which normally does not occur, so + # skip this file and report it, in order to draw attention + # from the administrator. + msg = "%s distfiles digester failed unexpectedly" % \ + (self.distfile,) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + self.config.log_failure("%s\t%s\t%s" % + (self.cpv, self.distfile, msg)) + self.config.file_failures[self.distfile] = self.cpv + self.wait() + return + + wrong_digest = self._find_bad_digest(digester.digests) + if wrong_digest is None: + self._success() + self.returncode = os.EX_OK + self.wait() + return + + self._start_fetch() + + _mirror_info = collections.namedtuple('_mirror_info', + 'name location') + + def _start_fetch(self): + + self._previously_added = False + self._fs_mirror_stack = [] + if self.config.options.distfiles_local is not None: + self._fs_mirror_stack.append(self._mirror_info( + 'distfiles-local', self.config.options.distfiles_local)) + if self.config.options.recycle_dir is not None: + self._fs_mirror_stack.append(self._mirror_info( + 'recycle', self.config.options.recycle_dir)) + + self._primaryuri_stack = [] + self._mirror_stack = [] + for uri in reversed(self.uri_tuple): + if uri.startswith('mirror://'): + self._mirror_stack.append( + self._mirror_iterator(uri, self.config.mirrors)) + else: + self._primaryuri_stack.append(uri) + + self._tried_uris = set() + self._try_next_mirror() + + @staticmethod + def _mirror_iterator(uri, mirrors_dict): + + slash_index = uri.find("/", 9) + if slash_index != -1: + mirror_name = uri[9:slash_index].strip("/") + mirrors = mirrors_dict.get(mirror_name) + if not mirrors: + return + mirrors = list(mirrors) + while mirrors: + mirror = mirrors.pop(random.randint(0, len(mirrors) - 1)) + yield mirror.rstrip("/") + "/" + uri[slash_index+1:] + + def _try_next_mirror(self): + if self._fs_mirror_stack: + self._fetch_fs(self._fs_mirror_stack.pop()) + return + else: + uri = self._next_uri() + if uri is not None: + self._tried_uris.add(uri) + self._fetch_uri(uri) + return + + if self._tried_uris: + msg = "all uris failed" + else: + msg = "no fetchable uris" + + self.config.log_failure("%s\t%s\t%s" % + (self.cpv, self.distfile, msg)) + self.config.file_failures[self.distfile] = self.cpv + self.returncode = os.EX_OK + self.wait() + + def _next_uri(self): + remaining_tries = self.config.options.tries - len(self._tried_uris) + if remaining_tries > 0: + + if remaining_tries <= self.config.options.tries / 2: + while self._primaryuri_stack: + uri = self._primaryuri_stack.pop() + if uri not in self._tried_uris: + return uri + + while self._mirror_stack: + uri = next(self._mirror_stack[-1], None) + if uri is None: + self._mirror_stack.pop() + else: + if uri not in self._tried_uris: + return uri + + while self._primaryuri_stack: + uri = self._primaryuri_stack.pop() + if uri not in self._tried_uris: + return uri + + return None + + def _fetch_fs(self, mirror_info): + file_path = os.path.join(mirror_info.location, self.distfile) + + st = None + size_ok = False + try: + st = os.stat(file_path) + except OSError as e: + if e.errno not in (errno.ENOENT, errno.ESTALE): + msg = "%s stat failed in %s: %s" % \ + (self.distfile, mirror_info.name, e) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + else: + size_ok = st.st_size == self.digests["size"] + self._current_stat = st + + if size_ok: + self._current_mirror = mirror_info + self._start_task( + FileDigester(file_path=file_path, + hash_names=(self._select_hash(),), + background=self.background, + logfile=self._log_path), + self._fs_mirror_digester_exit) + else: + self._try_next_mirror() + + def _fs_mirror_digester_exit(self, digester): + + self._assert_current(digester) + if self._was_cancelled(): + self.wait() + return + + current_mirror = self._current_mirror + if digester.returncode != os.EX_OK: + msg = "%s %s digester failed unexpectedly" % \ + (self.distfile, current_mirror.name) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + else: + bad_digest = self._find_bad_digest(digester.digests) + if bad_digest is not None: + msg = "%s %s has bad %s digest: expected %s, got %s" % \ + (self.distfile, current_mirror.name, bad_digest, + self.digests[bad_digest], digester.digests[bad_digest]) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + elif self.config.options.dry_run: + # Report success without actually touching any files + if self._same_device(current_mirror.location, + self.config.options.distfiles): + logging.info(("dry-run: hardlink '%s' from %s " + "to distfiles") % (self.distfile, current_mirror.name)) + else: + logging.info("dry-run: copy '%s' from %s to distfiles" % + (self.distfile, current_mirror.name)) + self._success() + self.returncode = os.EX_OK + self.wait() + return + else: + src = os.path.join(current_mirror.location, self.distfile) + dest = os.path.join(self.config.options.distfiles, self.distfile) + if self._hardlink_atomic(src, dest, + "%s to %s" % (current_mirror.name, "distfiles")): + logging.debug("hardlink '%s' from %s to distfiles" % + (self.distfile, current_mirror.name)) + self._success() + self.returncode = os.EX_OK + self.wait() + return + else: + self._start_task( + FileCopier(src_path=src, dest_path=dest, + background=(self.background and + self._log_path is not None), + logfile=self._log_path), + self._fs_mirror_copier_exit) + return + + self._try_next_mirror() + + def _fs_mirror_copier_exit(self, copier): + + self._assert_current(copier) + if self._was_cancelled(): + self.wait() + return + + current_mirror = self._current_mirror + if copier.returncode != os.EX_OK: + msg = "%s %s copy failed unexpectedly" % \ + (self.distfile, current_mirror.name) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + else: + + logging.debug("copy '%s' from %s to distfiles" % + (self.distfile, current_mirror.name)) + + # Apply the timestamp from the source file, but + # just rely on umask for permissions. + try: + if sys.hexversion >= 0x3030000: + os.utime(copier.dest_path, + ns=(self._current_stat.st_mtime_ns, + self._current_stat.st_mtime_ns)) + else: + os.utime(copier.dest_path, + (self._current_stat[stat.ST_MTIME], + self._current_stat[stat.ST_MTIME])) + except OSError as e: + msg = "%s %s utime failed unexpectedly: %s" % \ + (self.distfile, current_mirror.name, e) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + + self._success() + self.returncode = os.EX_OK + self.wait() + return + + self._try_next_mirror() + + def _fetch_uri(self, uri): + + if self.config.options.dry_run: + # Simply report success. + logging.info("dry-run: fetch '%s' from '%s'" % + (self.distfile, uri)) + self._success() + self.returncode = os.EX_OK + self.wait() + return + + if self.config.options.temp_dir: + self._fetch_tmp_dir_info = 'temp-dir' + distdir = self.config.options.temp_dir + else: + self._fetch_tmp_dir_info = 'distfiles' + distdir = self.config.options.distfiles + + tmp_basename = self.distfile + '._emirrordist_fetch_.%s' % os.getpid() + + variables = { + "DISTDIR": distdir, + "URI": uri, + "FILE": tmp_basename + } + + self._fetch_tmp_file = os.path.join(distdir, tmp_basename) + + try: + os.unlink(self._fetch_tmp_file) + except OSError: + pass + + args = portage.util.shlex_split(default_fetchcommand) + args = [portage.util.varexpand(x, mydict=variables) + for x in args] + + if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \ + not os.path.isabs(args[0]): + # Python 3.1 _execvp throws TypeError for non-absolute executable + # path passed as bytes (see http://bugs.python.org/issue8513). + fullname = portage.process.find_binary(args[0]) + if fullname is None: + raise portage.exception.CommandNotFound(args[0]) + args[0] = fullname + + args = [_unicode_encode(x, + encoding=_encodings['fs'], errors='strict') for x in args] + + null_fd = os.open(os.devnull, os.O_RDONLY) + fetcher = PopenProcess(background=self.background, + proc=subprocess.Popen(args, stdin=null_fd, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT), + scheduler=self.scheduler) + os.close(null_fd) + + fetcher.pipe_reader = PipeLogger(background=self.background, + input_fd=fetcher.proc.stdout, log_file_path=self._log_path, + scheduler=self.scheduler) + + self._start_task(fetcher, self._fetcher_exit) + + def _fetcher_exit(self, fetcher): + + self._assert_current(fetcher) + if self._was_cancelled(): + self.wait() + return + + if os.path.exists(self._fetch_tmp_file): + self._start_task( + FileDigester(file_path=self._fetch_tmp_file, + hash_names=(self._select_hash(),), + background=self.background, + logfile=self._log_path), + self._fetch_digester_exit) + else: + self._try_next_mirror() + + def _fetch_digester_exit(self, digester): + + self._assert_current(digester) + if self._was_cancelled(): + self.wait() + return + + if digester.returncode != os.EX_OK: + msg = "%s %s digester failed unexpectedly" % \ + (self.distfile, self._fetch_tmp_dir_info) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + else: + bad_digest = self._find_bad_digest(digester.digests) + if bad_digest is not None: + msg = "%s has bad %s digest: expected %s, got %s" % \ + (self.distfile, bad_digest, + self.digests[bad_digest], digester.digests[bad_digest]) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + try: + os.unlink(self._fetch_tmp_file) + except OSError: + pass + else: + dest = os.path.join(self.config.options.distfiles, self.distfile) + try: + os.rename(self._fetch_tmp_file, dest) + except OSError: + self._start_task( + FileCopier(src_path=self._fetch_tmp_file, + dest_path=dest, + background=(self.background and + self._log_path is not None), + logfile=self._log_path), + self._fetch_copier_exit) + return + else: + self._success() + self.returncode = os.EX_OK + self.wait() + return + + self._try_next_mirror() + + def _fetch_copier_exit(self, copier): + + self._assert_current(copier) + + try: + os.unlink(self._fetch_tmp_file) + except OSError: + pass + + if self._was_cancelled(): + self.wait() + return + + if copier.returncode == os.EX_OK: + self._success() + self.returncode = os.EX_OK + self.wait() + else: + # out of space? + msg = "%s %s copy failed unexpectedly" % \ + (self.distfile, self._fetch_tmp_dir_info) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + self.config.log_failure("%s\t%s\t%s" % + (self.cpv, self.distfile, msg)) + self.config.file_failures[self.distfile] = self.cpv + self.returncode = 1 + self.wait() + + def _unlink_file(self, file_path, dir_info): + try: + os.unlink(file_path) + except OSError as e: + if e.errno not in (errno.ENOENT, errno.ESTALE): + msg = "unlink '%s' failed in %s: %s" % \ + (self.distfile, dir_info, e) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + return False + return True + + def _have_needed_digests(self): + return "size" in self.digests and \ + self._select_hash() is not None + + def _select_hash(self): + if default_hash_name in self.digests: + return default_hash_name + else: + for hash_name in self.digests: + if hash_name != "size" and \ + hash_name in portage.checksum.hashfunc_map: + return hash_name + + return None + + def _find_bad_digest(self, digests): + for hash_name, hash_value in digests.items(): + if self.digests[hash_name] != hash_value: + return hash_name + return None + + @staticmethod + def _same_device(path1, path2): + try: + st1 = os.stat(path1) + st2 = os.stat(path2) + except OSError: + return False + else: + return st1.st_dev == st2.st_dev + + def _hardlink_atomic(self, src, dest, dir_info): + + head, tail = os.path.split(dest) + hardlink_tmp = os.path.join(head, ".%s._mirrordist_hardlink_.%s" % \ + (tail, os.getpid())) + + try: + try: + os.link(src, hardlink_tmp) + except OSError as e: + if e.errno != errno.EXDEV: + msg = "hardlink %s from %s failed: %s" % \ + (self.distfile, dir_info, e) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + return False + + try: + os.rename(hardlink_tmp, dest) + except OSError as e: + msg = "hardlink rename '%s' from %s failed: %s" % \ + (self.distfile, dir_info, e) + self.scheduler.output(msg + '\n', background=True, + log_path=self._log_path) + logging.error(msg) + return False + finally: + try: + os.unlink(hardlink_tmp) + except OSError: + pass + + return True diff --git a/portage_with_autodep/pym/portage/_emirrordist/MirrorDistTask.py b/portage_with_autodep/pym/portage/_emirrordist/MirrorDistTask.py new file mode 100644 index 0000000..b6f875d --- /dev/null +++ b/portage_with_autodep/pym/portage/_emirrordist/MirrorDistTask.py @@ -0,0 +1,218 @@ +# Copyright 2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import errno +import logging +import sys +import time + +try: + import threading +except ImportError: + import dummy_threading as threading + +import portage +from portage import os +from portage.util._async.TaskScheduler import TaskScheduler +from _emerge.CompositeTask import CompositeTask +from .FetchIterator import FetchIterator +from .DeletionIterator import DeletionIterator + +if sys.hexversion >= 0x3000000: + long = int + +class MirrorDistTask(CompositeTask): + + __slots__ = ('_config', '_terminated', '_term_check_id') + + def __init__(self, config): + CompositeTask.__init__(self, scheduler=config.event_loop) + self._config = config + self._terminated = threading.Event() + + def _start(self): + self._term_check_id = self.scheduler.idle_add(self._termination_check) + fetch = TaskScheduler(iter(FetchIterator(self._config)), + max_jobs=self._config.options.jobs, + max_load=self._config.options.load_average, + event_loop=self._config.event_loop) + self._start_task(fetch, self._fetch_exit) + + def _fetch_exit(self, fetch): + + self._assert_current(fetch) + if self._was_cancelled(): + self.wait() + return + + if self._config.options.delete: + deletion = TaskScheduler(iter(DeletionIterator(self._config)), + max_jobs=self._config.options.jobs, + max_load=self._config.options.load_average, + event_loop=self._config.event_loop) + self._start_task(deletion, self._deletion_exit) + return + + self._post_deletion() + + def _deletion_exit(self, deletion): + + self._assert_current(deletion) + if self._was_cancelled(): + self.wait() + return + + self._post_deletion() + + def _post_deletion(self): + + if self._config.options.recycle_db is not None: + self._update_recycle_db() + + if self._config.options.scheduled_deletion_log is not None: + self._scheduled_deletion_log() + + self._summary() + + self.returncode = os.EX_OK + self._current_task = None + self.wait() + + def _update_recycle_db(self): + + start_time = self._config.start_time + recycle_dir = self._config.options.recycle_dir + recycle_db = self._config.recycle_db + r_deletion_delay = self._config.options.recycle_deletion_delay + + # Use a dict optimize access. + recycle_db_cache = dict(recycle_db.items()) + + for filename in os.listdir(recycle_dir): + + recycle_file = os.path.join(recycle_dir, filename) + + try: + st = os.stat(recycle_file) + except OSError as e: + if e.errno not in (errno.ENOENT, errno.ESTALE): + logging.error(("stat failed for '%s' in " + "recycle: %s") % (filename, e)) + continue + + value = recycle_db_cache.pop(filename, None) + if value is None: + logging.debug(("add '%s' to " + "recycle db") % filename) + recycle_db[filename] = (st.st_size, start_time) + else: + r_size, r_time = value + if long(r_size) != st.st_size: + recycle_db[filename] = (st.st_size, start_time) + elif r_time + r_deletion_delay < start_time: + if self._config.options.dry_run: + logging.info(("dry-run: delete '%s' from " + "recycle") % filename) + logging.info(("drop '%s' from " + "recycle db") % filename) + else: + try: + os.unlink(recycle_file) + except OSError as e: + if e.errno not in (errno.ENOENT, errno.ESTALE): + logging.error(("delete '%s' from " + "recycle failed: %s") % (filename, e)) + else: + logging.debug(("delete '%s' from " + "recycle") % filename) + try: + del recycle_db[filename] + except KeyError: + pass + else: + logging.debug(("drop '%s' from " + "recycle db") % filename) + + # Existing files were popped from recycle_db_cache, + # so any remaining entries are for files that no + # longer exist. + for filename in recycle_db_cache: + try: + del recycle_db[filename] + except KeyError: + pass + else: + logging.debug(("drop non-existent '%s' from " + "recycle db") % filename) + + def _scheduled_deletion_log(self): + + start_time = self._config.start_time + dry_run = self._config.options.dry_run + deletion_delay = self._config.options.deletion_delay + distfiles_db = self._config.distfiles_db + + date_map = {} + for filename, timestamp in self._config.deletion_db.items(): + date = timestamp + deletion_delay + if date < start_time: + date = start_time + date = time.strftime("%Y-%m-%d", time.gmtime(date)) + date_files = date_map.get(date) + if date_files is None: + date_files = [] + date_map[date] = date_files + date_files.append(filename) + + if dry_run: + logging.warn(("dry-run: scheduled-deletions log " + "will be summarized via logging.info")) + + lines = [] + for date in sorted(date_map): + date_files = date_map[date] + if dry_run: + logging.info(("dry-run: scheduled deletions for %s: %s files") % + (date, len(date_files))) + lines.append("%s\n" % date) + for filename in date_files: + cpv = "unknown" + if distfiles_db is not None: + cpv = distfiles_db.get(filename, cpv) + lines.append("\t%s\t%s\n" % (filename, cpv)) + + if not dry_run: + portage.util.write_atomic( + self._config.options.scheduled_deletion_log, + "".join(lines)) + + def _summary(self): + elapsed_time = time.time() - self._config.start_time + fail_count = len(self._config.file_failures) + delete_count = self._config.delete_count + scheduled_deletion_count = self._config.scheduled_deletion_count - delete_count + added_file_count = self._config.added_file_count + added_byte_count = self._config.added_byte_count + + logging.info("finished in %i seconds" % elapsed_time) + logging.info("failed to fetch %i files" % fail_count) + logging.info("deleted %i files" % delete_count) + logging.info("deletion of %i files scheduled" % + scheduled_deletion_count) + logging.info("added %i files" % added_file_count) + logging.info("added %i bytes total" % added_byte_count) + + def terminate(self): + self._terminated.set() + + def _termination_check(self): + if self._terminated.is_set(): + self.cancel() + self.wait() + return True + + def _wait(self): + CompositeTask._wait(self) + if self._term_check_id is not None: + self.scheduler.source_remove(self._term_check_id) + self._term_check_id = None diff --git a/portage_with_autodep/pym/portage/tests/dbapi/__init__.py b/portage_with_autodep/pym/portage/_emirrordist/__init__.py index 532918b..6cde932 100644 --- a/portage_with_autodep/pym/portage/tests/dbapi/__init__.py +++ b/portage_with_autodep/pym/portage/_emirrordist/__init__.py @@ -1,2 +1,2 @@ -# Copyright 2011 Gentoo Foundation +# Copyright 2013 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 diff --git a/portage_with_autodep/pym/portage/_emirrordist/main.py b/portage_with_autodep/pym/portage/_emirrordist/main.py new file mode 100644 index 0000000..f28aad7 --- /dev/null +++ b/portage_with_autodep/pym/portage/_emirrordist/main.py @@ -0,0 +1,455 @@ +# Copyright 2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import logging +import sys + +import portage +from portage import os +from portage.util import normalize_path, writemsg_level +from portage.util._argparse import ArgumentParser +from portage.util._async.run_main_scheduler import run_main_scheduler +from portage.util._async.SchedulerInterface import SchedulerInterface +from portage.util._eventloop.global_event_loop import global_event_loop +from .Config import Config +from .MirrorDistTask import MirrorDistTask + +if sys.hexversion >= 0x3000000: + long = int + +seconds_per_day = 24 * 60 * 60 + +common_options = ( + { + "longopt" : "--dry-run", + "help" : "perform a trial run with no changes made (usually combined " + "with --verbose)", + "action" : "store_true" + }, + { + "longopt" : "--verbose", + "shortopt" : "-v", + "help" : "display extra information on stderr " + "(multiple occurences increase verbosity)", + "action" : "count", + "default" : 0, + }, + { + "longopt" : "--ignore-default-opts", + "help" : "do not use the EMIRRORDIST_DEFAULT_OPTS environment variable", + "action" : "store_true" + }, + { + "longopt" : "--distfiles", + "help" : "distfiles directory to use (required)", + "metavar" : "DIR" + }, + { + "longopt" : "--jobs", + "shortopt" : "-j", + "help" : "number of concurrent jobs to run", + "type" : int + }, + { + "longopt" : "--load-average", + "shortopt" : "-l", + "help" : "load average limit for spawning of new concurrent jobs", + "metavar" : "LOAD", + "type" : float + }, + { + "longopt" : "--tries", + "help" : "maximum number of tries per file, 0 means unlimited (default is 10)", + "default" : 10, + "type" : int + }, + { + "longopt" : "--repo", + "help" : "name of repo to operate on" + }, + { + "longopt" : "--config-root", + "help" : "location of portage config files", + "metavar" : "DIR" + }, + { + "longopt" : "--portdir", + "help" : "override the PORTDIR variable (deprecated in favor of --repositories-configuration)", + "metavar" : "DIR" + }, + { + "longopt" : "--portdir-overlay", + "help" : "override the PORTDIR_OVERLAY variable (deprecated in favor of --repositories-configuration)" + }, + { + "longopt" : "--repositories-configuration", + "help" : "override configuration of repositories (in format of repos.conf)" + }, + { + "longopt" : "--strict-manifests", + "help" : "manually override \"strict\" FEATURES setting", + "choices" : ("y", "n"), + "metavar" : "<y|n>", + }, + { + "longopt" : "--failure-log", + "help" : "log file for fetch failures, with tab-delimited " + "output, for reporting purposes", + "metavar" : "FILE" + }, + { + "longopt" : "--success-log", + "help" : "log file for fetch successes, with tab-delimited " + "output, for reporting purposes", + "metavar" : "FILE" + }, + { + "longopt" : "--scheduled-deletion-log", + "help" : "log file for scheduled deletions, with tab-delimited " + "output, for reporting purposes", + "metavar" : "FILE" + }, + { + "longopt" : "--delete", + "help" : "enable deletion of unused distfiles", + "action" : "store_true" + }, + { + "longopt" : "--deletion-db", + "help" : "database file used to track lifetime of files " + "scheduled for delayed deletion", + "metavar" : "FILE" + }, + { + "longopt" : "--deletion-delay", + "help" : "delay time for deletion, measured in seconds", + "metavar" : "SECONDS" + }, + { + "longopt" : "--temp-dir", + "help" : "temporary directory for downloads", + "metavar" : "DIR" + }, + { + "longopt" : "--mirror-overrides", + "help" : "file holding a list of mirror overrides", + "metavar" : "FILE" + }, + { + "longopt" : "--mirror-skip", + "help" : "comma delimited list of mirror targets to skip " + "when fetching" + }, + { + "longopt" : "--restrict-mirror-exemptions", + "help" : "comma delimited list of mirror targets for which to " + "ignore RESTRICT=\"mirror\"" + }, + { + "longopt" : "--verify-existing-digest", + "help" : "use digest as a verification of whether existing " + "distfiles are valid", + "action" : "store_true" + }, + { + "longopt" : "--distfiles-local", + "help" : "distfiles-local directory to use", + "metavar" : "DIR" + }, + { + "longopt" : "--distfiles-db", + "help" : "database file used to track which ebuilds a " + "distfile belongs to", + "metavar" : "FILE" + }, + { + "longopt" : "--recycle-dir", + "help" : "directory for extended retention of files that " + "are removed from distdir with the --delete option", + "metavar" : "DIR" + }, + { + "longopt" : "--recycle-db", + "help" : "database file used to track lifetime of files " + "in recycle dir", + "metavar" : "FILE" + }, + { + "longopt" : "--recycle-deletion-delay", + "help" : "delay time for deletion of unused files from " + "recycle dir, measured in seconds (defaults to " + "the equivalent of 60 days)", + "default" : 60 * seconds_per_day, + "metavar" : "SECONDS", + "type" : int + }, + { + "longopt" : "--fetch-log-dir", + "help" : "directory for individual fetch logs", + "metavar" : "DIR" + }, + { + "longopt" : "--whitelist-from", + "help" : "specifies a file containing a list of files to " + "whitelist, one per line, # prefixed lines ignored", + "action" : "append", + "metavar" : "FILE" + }, +) + +def parse_args(args): + description = "emirrordist - a fetch tool for mirroring " \ + "of package distfiles" + usage = "emirrordist [options] <action>" + parser = ArgumentParser(description=description, usage=usage) + + actions = parser.add_argument_group('Actions') + actions.add_argument("--version", + action="store_true", + help="display portage version and exit") + actions.add_argument("--mirror", + action="store_true", + help="mirror distfiles for the selected repository") + + common = parser.add_argument_group('Common options') + for opt_info in common_options: + opt_pargs = [opt_info["longopt"]] + if opt_info.get("shortopt"): + opt_pargs.append(opt_info["shortopt"]) + opt_kwargs = {"help" : opt_info["help"]} + for k in ("action", "choices", "default", "metavar", "type"): + if k in opt_info: + opt_kwargs[k] = opt_info[k] + common.add_argument(*opt_pargs, **opt_kwargs) + + options, args = parser.parse_known_args(args) + + return (parser, options, args) + +def emirrordist_main(args): + + # The calling environment is ignored, so the program is + # completely controlled by commandline arguments. + env = {} + + if not sys.stdout.isatty(): + portage.output.nocolor() + env['NOCOLOR'] = 'true' + + parser, options, args = parse_args(args) + + if options.version: + sys.stdout.write("Portage %s\n" % portage.VERSION) + return os.EX_OK + + config_root = options.config_root + + if options.portdir is not None: + writemsg_level("emirrordist: warning: --portdir option is deprecated in favor of --repositories-configuration option\n", + level=logging.WARNING, noiselevel=-1) + if options.portdir_overlay is not None: + writemsg_level("emirrordist: warning: --portdir-overlay option is deprecated in favor of --repositories-configuration option\n", + level=logging.WARNING, noiselevel=-1) + + if options.repositories_configuration is not None: + env['PORTAGE_REPOSITORIES'] = options.repositories_configuration + elif options.portdir_overlay is not None: + env['PORTDIR_OVERLAY'] = options.portdir_overlay + + if options.portdir is not None: + env['PORTDIR'] = options.portdir + + settings = portage.config(config_root=config_root, + local_config=False, env=env) + + default_opts = None + if not options.ignore_default_opts: + default_opts = settings.get('EMIRRORDIST_DEFAULT_OPTS', '').split() + + if default_opts: + parser, options, args = parse_args(default_opts + args) + + settings = portage.config(config_root=config_root, + local_config=False, env=env) + + if options.repo is None: + if len(settings.repositories.prepos) == 2: + for repo in settings.repositories: + if repo.name != "DEFAULT": + options.repo = repo.name + break + + if options.repo is None: + parser.error("--repo option is required") + + repo_path = settings.repositories.treemap.get(options.repo) + if repo_path is None: + parser.error("Unable to locate repository named '%s'" % (options.repo,)) + + if options.jobs is not None: + options.jobs = int(options.jobs) + + if options.load_average is not None: + options.load_average = float(options.load_average) + + if options.failure_log is not None: + options.failure_log = normalize_path( + os.path.abspath(options.failure_log)) + + parent_dir = os.path.dirname(options.failure_log) + if not (os.path.isdir(parent_dir) and + os.access(parent_dir, os.W_OK|os.X_OK)): + parser.error(("--failure-log '%s' parent is not a " + "writable directory") % options.failure_log) + + if options.success_log is not None: + options.success_log = normalize_path( + os.path.abspath(options.success_log)) + + parent_dir = os.path.dirname(options.success_log) + if not (os.path.isdir(parent_dir) and + os.access(parent_dir, os.W_OK|os.X_OK)): + parser.error(("--success-log '%s' parent is not a " + "writable directory") % options.success_log) + + if options.scheduled_deletion_log is not None: + options.scheduled_deletion_log = normalize_path( + os.path.abspath(options.scheduled_deletion_log)) + + parent_dir = os.path.dirname(options.scheduled_deletion_log) + if not (os.path.isdir(parent_dir) and + os.access(parent_dir, os.W_OK|os.X_OK)): + parser.error(("--scheduled-deletion-log '%s' parent is not a " + "writable directory") % options.scheduled_deletion_log) + + if options.deletion_db is None: + parser.error("--scheduled-deletion-log requires --deletion-db") + + if options.deletion_delay is not None: + options.deletion_delay = long(options.deletion_delay) + if options.deletion_db is None: + parser.error("--deletion-delay requires --deletion-db") + + if options.deletion_db is not None: + if options.deletion_delay is None: + parser.error("--deletion-db requires --deletion-delay") + options.deletion_db = normalize_path( + os.path.abspath(options.deletion_db)) + + if options.temp_dir is not None: + options.temp_dir = normalize_path( + os.path.abspath(options.temp_dir)) + + if not (os.path.isdir(options.temp_dir) and + os.access(options.temp_dir, os.W_OK|os.X_OK)): + parser.error(("--temp-dir '%s' is not a " + "writable directory") % options.temp_dir) + + if options.distfiles is not None: + options.distfiles = normalize_path( + os.path.abspath(options.distfiles)) + + if not (os.path.isdir(options.distfiles) and + os.access(options.distfiles, os.W_OK|os.X_OK)): + parser.error(("--distfiles '%s' is not a " + "writable directory") % options.distfiles) + else: + parser.error("missing required --distfiles parameter") + + if options.mirror_overrides is not None: + options.mirror_overrides = normalize_path( + os.path.abspath(options.mirror_overrides)) + + if not (os.access(options.mirror_overrides, os.R_OK) and + os.path.isfile(options.mirror_overrides)): + parser.error( + "--mirror-overrides-file '%s' is not a readable file" % + options.mirror_overrides) + + if options.distfiles_local is not None: + options.distfiles_local = normalize_path( + os.path.abspath(options.distfiles_local)) + + if not (os.path.isdir(options.distfiles_local) and + os.access(options.distfiles_local, os.W_OK|os.X_OK)): + parser.error(("--distfiles-local '%s' is not a " + "writable directory") % options.distfiles_local) + + if options.distfiles_db is not None: + options.distfiles_db = normalize_path( + os.path.abspath(options.distfiles_db)) + + if options.tries is not None: + options.tries = int(options.tries) + + if options.recycle_dir is not None: + options.recycle_dir = normalize_path( + os.path.abspath(options.recycle_dir)) + if not (os.path.isdir(options.recycle_dir) and + os.access(options.recycle_dir, os.W_OK|os.X_OK)): + parser.error(("--recycle-dir '%s' is not a " + "writable directory") % options.recycle_dir) + + if options.recycle_db is not None: + if options.recycle_dir is None: + parser.error("--recycle-db requires " + "--recycle-dir to be specified") + options.recycle_db = normalize_path( + os.path.abspath(options.recycle_db)) + + if options.recycle_deletion_delay is not None: + options.recycle_deletion_delay = \ + long(options.recycle_deletion_delay) + + if options.fetch_log_dir is not None: + options.fetch_log_dir = normalize_path( + os.path.abspath(options.fetch_log_dir)) + + if not (os.path.isdir(options.fetch_log_dir) and + os.access(options.fetch_log_dir, os.W_OK|os.X_OK)): + parser.error(("--fetch-log-dir '%s' is not a " + "writable directory") % options.fetch_log_dir) + + if options.whitelist_from: + normalized_paths = [] + for x in options.whitelist_from: + path = normalize_path(os.path.abspath(x)) + normalized_paths.append(path) + if not (os.access(path, os.R_OK) and os.path.isfile(path)): + parser.error( + "--whitelist-from '%s' is not a readable file" % x) + options.whitelist_from = normalized_paths + + if options.strict_manifests is not None: + if options.strict_manifests == "y": + settings.features.add("strict") + else: + settings.features.discard("strict") + + settings.lock() + + portdb = portage.portdbapi(mysettings=settings) + + # Limit ebuilds to the specified repo. + portdb.porttrees = [repo_path] + + portage.util.initialize_logger() + + if options.verbose > 0: + l = logging.getLogger() + l.setLevel(l.getEffectiveLevel() - 10 * options.verbose) + + with Config(options, portdb, + SchedulerInterface(global_event_loop())) as config: + + if not options.mirror: + parser.error('No action specified') + + returncode = os.EX_OK + + if options.mirror: + signum = run_main_scheduler(MirrorDistTask(config)) + if signum is not None: + sys.exit(128 + signum) + + return returncode diff --git a/portage_with_autodep/pym/portage/_global_updates.py b/portage_with_autodep/pym/portage/_global_updates.py index 868d1ee..5175043 100644 --- a/portage_with_autodep/pym/portage/_global_updates.py +++ b/portage_with_autodep/pym/portage/_global_updates.py @@ -15,7 +15,7 @@ from portage.update import grab_updates, parse_updates, update_config_files, upd from portage.util import grabfile, shlex_split, \ writemsg, writemsg_stdout, write_atomic -def _global_updates(trees, prev_mtimes, quiet=False): +def _global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True): """ Perform new global updates if they exist in 'profiles/updates/' subdirectories of all active repositories (PORTDIR + PORTDIR_OVERLAY). @@ -37,7 +37,7 @@ def _global_updates(trees, prev_mtimes, quiet=False): "SANDBOX_ACTIVE" in os.environ or \ len(trees) != 1: return retupd - root = "/" + root = trees._running_eroot mysettings = trees[root]["vartree"].settings portdb = trees[root]["porttree"].dbapi vardb = trees[root]["vartree"].dbapi @@ -73,10 +73,10 @@ def _global_updates(trees, prev_mtimes, quiet=False): continue try: - if mysettings.get("PORTAGE_CALLER") == "fixpackages": - update_data = grab_updates(updpath) + if if_mtime_changed: + update_data = grab_updates(updpath, prev_mtimes=prev_mtimes) else: - update_data = grab_updates(updpath, prev_mtimes) + update_data = grab_updates(updpath) except DirectoryNotFound: continue myupd = [] @@ -217,8 +217,7 @@ def _global_updates(trees, prev_mtimes, quiet=False): do_upgrade_packagesmessage = False # We gotta do the brute force updates for these now. - if mysettings.get("PORTAGE_CALLER") == "fixpackages" or \ - "fixpackages" in mysettings.features: + if True: def onUpdate(maxval, curval): if curval > 0: writemsg_stdout("#") diff --git a/portage_with_autodep/pym/portage/_global_updates.pyo b/portage_with_autodep/pym/portage/_global_updates.pyo Binary files differnew file mode 100644 index 0000000..3e2e8de --- /dev/null +++ b/portage_with_autodep/pym/portage/_global_updates.pyo diff --git a/portage_with_autodep/pym/portage/_legacy_globals.py b/portage_with_autodep/pym/portage/_legacy_globals.py index 615591a..abffa0e 100644 --- a/portage_with_autodep/pym/portage/_legacy_globals.py +++ b/portage_with_autodep/pym/portage/_legacy_globals.py @@ -35,19 +35,14 @@ def _get_legacy_global(name): constructed.add('db') del portage._initializing_globals - settings = portage.db["/"]["vartree"].settings - - for root in portage.db: - if root != "/": - settings = portage.db[root]["vartree"].settings - break - - portage.output._init(config_root=settings['PORTAGE_CONFIGROOT']) + settings = portage.db[portage.db._target_eroot]["vartree"].settings portage.settings = settings constructed.add('settings') - portage.root = root + # Since portage.db now uses EROOT for keys instead of ROOT, we make + # portage.root refer to EROOT such that it continues to work as a key. + portage.root = portage.db._target_eroot constructed.add('root') # COMPATIBILITY diff --git a/portage_with_autodep/pym/portage/_legacy_globals.pyo b/portage_with_autodep/pym/portage/_legacy_globals.pyo Binary files differnew file mode 100644 index 0000000..2e50cbe --- /dev/null +++ b/portage_with_autodep/pym/portage/_legacy_globals.pyo diff --git a/portage_with_autodep/pym/portage/_selinux.pyo b/portage_with_autodep/pym/portage/_selinux.pyo Binary files differnew file mode 100644 index 0000000..7a413e0 --- /dev/null +++ b/portage_with_autodep/pym/portage/_selinux.pyo diff --git a/portage_with_autodep/pym/portage/_sets/__init__.py b/portage_with_autodep/pym/portage/_sets/__init__.py index 1b3484e..88a4b3b 100644 --- a/portage_with_autodep/pym/portage/_sets/__init__.py +++ b/portage_with_autodep/pym/portage/_sets/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2007 Gentoo Foundation +# Copyright 2007-2011 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 from __future__ import print_function @@ -6,16 +6,27 @@ from __future__ import print_function __all__ = ["SETPREFIX", "get_boolean", "SetConfigError", "SetConfig", "load_default_config"] +import io +import logging +import sys try: - from configparser import SafeConfigParser, NoOptionError + from configparser import NoOptionError, ParsingError + if sys.hexversion >= 0x3020000: + from configparser import ConfigParser as SafeConfigParser + else: + from configparser import SafeConfigParser except ImportError: - from ConfigParser import SafeConfigParser, NoOptionError + from ConfigParser import SafeConfigParser, NoOptionError, ParsingError from portage import os from portage import load_mod +from portage import _unicode_decode +from portage import _unicode_encode +from portage import _encodings from portage.const import USER_CONFIG_PATH, GLOBAL_CONFIG_PATH from portage.const import _ENABLE_SET_CONFIG from portage.exception import PackageSetNotFound from portage.localization import _ +from portage.util import writemsg_level SETPREFIX = "@" @@ -43,7 +54,32 @@ class SetConfig(object): }) if _ENABLE_SET_CONFIG: - self._parser.read(paths) + # use read_file/readfp in order to control decoding of unicode + try: + # Python >=3.2 + read_file = self._parser.read_file + except AttributeError: + read_file = self._parser.readfp + + for p in paths: + f = None + try: + f = io.open(_unicode_encode(p, + encoding=_encodings['fs'], errors='strict'), + mode='r', encoding=_encodings['repo.content'], + errors='replace') + except EnvironmentError: + pass + else: + try: + read_file(f) + except ParsingError as e: + writemsg_level(_unicode_decode( + _("!!! Error while reading sets config file: %s\n") + ) % e, level=logging.ERROR, noiselevel=-1) + finally: + if f is not None: + f.close() else: self._create_default_config() @@ -201,7 +237,6 @@ class SetConfig(object): except KeyError: raise PackageSetNotFound(setname) myatoms = myset.getAtoms() - parser = self._parser if ignorelist is None: ignorelist = set() diff --git a/portage_with_autodep/pym/portage/_sets/__init__.pyo b/portage_with_autodep/pym/portage/_sets/__init__.pyo Binary files differnew file mode 100644 index 0000000..5318dbe --- /dev/null +++ b/portage_with_autodep/pym/portage/_sets/__init__.pyo diff --git a/portage_with_autodep/pym/portage/_sets/base.pyo b/portage_with_autodep/pym/portage/_sets/base.pyo Binary files differnew file mode 100644 index 0000000..89e53be --- /dev/null +++ b/portage_with_autodep/pym/portage/_sets/base.pyo diff --git a/portage_with_autodep/pym/portage/_sets/dbapi.py b/portage_with_autodep/pym/portage/_sets/dbapi.py index 0f238f0..4982a92 100644 --- a/portage_with_autodep/pym/portage/_sets/dbapi.py +++ b/portage_with_autodep/pym/portage/_sets/dbapi.py @@ -1,10 +1,10 @@ -# Copyright 2007-2010 Gentoo Foundation +# Copyright 2007-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import time from portage import os -from portage.versions import catpkgsplit, catsplit, pkgcmp, best +from portage.versions import best, catsplit, vercmp from portage.dep import Atom from portage.localization import _ from portage._sets.base import PackageSet @@ -72,18 +72,16 @@ class OwnerSet(PackageSet): aux_keys = ["SLOT"] if exclude_paths is None: for link, p in vardb._owners.iter_owners(paths): - cat, pn = catpkgsplit(link.mycpv)[:2] slot, = aux_get(link.mycpv, aux_keys) - rValue.add("%s/%s:%s" % (cat, pn, slot)) + rValue.add("%s:%s" % (link.mycpv.cp, slot)) else: all_paths = set() all_paths.update(paths) all_paths.update(exclude_paths) exclude_atoms = set() for link, p in vardb._owners.iter_owners(all_paths): - cat, pn = catpkgsplit(link.mycpv)[:2] slot, = aux_get(link.mycpv, aux_keys) - atom = "%s/%s:%s" % (cat, pn, slot) + atom = "%s:%s" % (link.mycpv.cp, slot) rValue.add(atom) if p in exclude_paths: exclude_atoms.add(atom) @@ -184,9 +182,7 @@ class DowngradeSet(PackageSet): ebuild = xmatch(xmatch_level, slot_atom) if not ebuild: continue - ebuild_split = catpkgsplit(ebuild)[1:] - installed_split = catpkgsplit(cpv)[1:] - if pkgcmp(installed_split, ebuild_split) > 0: + if vercmp(cpv.version, ebuild.version) > 0: atoms.append(slot_atom) self._setAtoms(atoms) diff --git a/portage_with_autodep/pym/portage/_sets/dbapi.pyo b/portage_with_autodep/pym/portage/_sets/dbapi.pyo Binary files differnew file mode 100644 index 0000000..20bf848 --- /dev/null +++ b/portage_with_autodep/pym/portage/_sets/dbapi.pyo diff --git a/portage_with_autodep/pym/portage/_sets/files.pyo b/portage_with_autodep/pym/portage/_sets/files.pyo Binary files differnew file mode 100644 index 0000000..eb03c00 --- /dev/null +++ b/portage_with_autodep/pym/portage/_sets/files.pyo diff --git a/portage_with_autodep/pym/portage/_sets/libs.pyo b/portage_with_autodep/pym/portage/_sets/libs.pyo Binary files differnew file mode 100644 index 0000000..72fc1bb --- /dev/null +++ b/portage_with_autodep/pym/portage/_sets/libs.pyo diff --git a/portage_with_autodep/pym/portage/_sets/profiles.pyo b/portage_with_autodep/pym/portage/_sets/profiles.pyo Binary files differnew file mode 100644 index 0000000..9502044 --- /dev/null +++ b/portage_with_autodep/pym/portage/_sets/profiles.pyo diff --git a/portage_with_autodep/pym/portage/_sets/security.py b/portage_with_autodep/pym/portage/_sets/security.py index 2d8fcf6..7e856bc 100644 --- a/portage_with_autodep/pym/portage/_sets/security.py +++ b/portage_with_autodep/pym/portage/_sets/security.py @@ -1,9 +1,9 @@ -# Copyright 2007 Gentoo Foundation +# Copyright 2007-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import portage.glsa as glsa from portage._sets.base import PackageSet -from portage.versions import catpkgsplit, pkgcmp +from portage.versions import vercmp from portage._sets import get_boolean __all__ = ["SecuritySet", "NewGlsaSet", "NewAffectedSet", "AffectedSet"] @@ -45,12 +45,12 @@ class SecuritySet(PackageSet): for atom in atomlist[:]: cpv = self._portdbapi.xmatch("match-all", atom)[0] slot = self._portdbapi.aux_get(cpv, ["SLOT"])[0] - cps = "/".join(catpkgsplit(cpv)[0:2]) + ":" + slot + cps = "%s:%s" % (cpv.cp, slot) if not cps in mydict: mydict[cps] = (atom, cpv) else: other_cpv = mydict[cps][1] - if pkgcmp(catpkgsplit(cpv)[1:], catpkgsplit(other_cpv)[1:]) > 0: + if vercmp(cpv.version, other_cpv.version) > 0: atomlist.remove(mydict[cps][0]) mydict[cps] = (atom, cpv) return atomlist diff --git a/portage_with_autodep/pym/portage/_sets/security.pyo b/portage_with_autodep/pym/portage/_sets/security.pyo Binary files differnew file mode 100644 index 0000000..ea67514 --- /dev/null +++ b/portage_with_autodep/pym/portage/_sets/security.pyo diff --git a/portage_with_autodep/pym/portage/_sets/shell.pyo b/portage_with_autodep/pym/portage/_sets/shell.pyo Binary files differnew file mode 100644 index 0000000..e5e4561 --- /dev/null +++ b/portage_with_autodep/pym/portage/_sets/shell.pyo diff --git a/portage_with_autodep/pym/portage/cache/__init__.pyo b/portage_with_autodep/pym/portage/cache/__init__.pyo Binary files differnew file mode 100644 index 0000000..eb5a90e --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/__init__.pyo diff --git a/portage_with_autodep/pym/portage/cache/anydbm.pyo b/portage_with_autodep/pym/portage/cache/anydbm.pyo Binary files differnew file mode 100644 index 0000000..5946da9 --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/anydbm.pyo diff --git a/portage_with_autodep/pym/portage/cache/cache_errors.pyo b/portage_with_autodep/pym/portage/cache/cache_errors.pyo Binary files differnew file mode 100644 index 0000000..866088e --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/cache_errors.pyo diff --git a/portage_with_autodep/pym/portage/cache/ebuild_xattr.py b/portage_with_autodep/pym/portage/cache/ebuild_xattr.py index 6b388fa..0086e40 100644 --- a/portage_with_autodep/pym/portage/cache/ebuild_xattr.py +++ b/portage_with_autodep/pym/portage/cache/ebuild_xattr.py @@ -1,5 +1,6 @@ +# -*- coding: UTF8 -*- # Copyright: 2009-2011 Gentoo Foundation -# Author(s): Petteri Räty (betelgeuse@gentoo.org) +# Author(s): Petteri Räty (betelgeuse@gentoo.org) # License: GPL2 __all__ = ['database'] diff --git a/portage_with_autodep/pym/portage/cache/ebuild_xattr.pyo b/portage_with_autodep/pym/portage/cache/ebuild_xattr.pyo Binary files differnew file mode 100644 index 0000000..fe32dcc --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/ebuild_xattr.pyo diff --git a/portage_with_autodep/pym/portage/cache/flat_hash.py b/portage_with_autodep/pym/portage/cache/flat_hash.py index b6bc074..2eae9f6 100644 --- a/portage_with_autodep/pym/portage/cache/flat_hash.py +++ b/portage_with_autodep/pym/portage/cache/flat_hash.py @@ -31,7 +31,7 @@ class database(fs_template.FsBased): self.label.lstrip(os.path.sep).rstrip(os.path.sep)) write_keys = set(self._known_keys) write_keys.add("_eclasses_") - write_keys.add("_mtime_") + write_keys.add("_%s_" % (self.validation_chf,)) self._write_keys = sorted(write_keys) if not self.readonly and not os.path.exists(self.location): self._ensure_dirs() @@ -69,7 +69,6 @@ class database(fs_template.FsBased): raise cache_errors.CacheCorruption(cpv, e) def _setitem(self, cpv, values): -# import pdb;pdb.set_trace() s = cpv.rfind("/") fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:])) try: @@ -153,3 +152,9 @@ class database(fs_template.FsBased): dirs.append((depth+1, p)) continue yield p[len_base+1:] + + +class md5_database(database): + + validation_chf = 'md5' + store_eclass_paths = False diff --git a/portage_with_autodep/pym/portage/cache/flat_hash.pyo b/portage_with_autodep/pym/portage/cache/flat_hash.pyo Binary files differnew file mode 100644 index 0000000..4f568a8 --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/flat_hash.pyo diff --git a/portage_with_autodep/pym/portage/cache/flat_list.pyo b/portage_with_autodep/pym/portage/cache/flat_list.pyo Binary files differnew file mode 100644 index 0000000..ab7dc82 --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/flat_list.pyo diff --git a/portage_with_autodep/pym/portage/cache/fs_template.pyo b/portage_with_autodep/pym/portage/cache/fs_template.pyo Binary files differnew file mode 100644 index 0000000..6cbbc2f --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/fs_template.pyo diff --git a/portage_with_autodep/pym/portage/cache/mappings.py b/portage_with_autodep/pym/portage/cache/mappings.py index 60a918e..bc8ce9a 100644 --- a/portage_with_autodep/pym/portage/cache/mappings.py +++ b/portage_with_autodep/pym/portage/cache/mappings.py @@ -316,7 +316,7 @@ def slot_dict_class(keys, prefix="_val_"): attribute names from keys @type prefix: String @rtype: SlotDict - @returns: A class that constructs SlotDict instances + @return: A class that constructs SlotDict instances having the specified keys. """ if isinstance(keys, frozenset): diff --git a/portage_with_autodep/pym/portage/cache/mappings.pyo b/portage_with_autodep/pym/portage/cache/mappings.pyo Binary files differnew file mode 100644 index 0000000..1eb3f4f --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/mappings.pyo diff --git a/portage_with_autodep/pym/portage/cache/metadata.py b/portage_with_autodep/pym/portage/cache/metadata.py index 4c735d7..9d2c3a5 100644 --- a/portage_with_autodep/pym/portage/cache/metadata.py +++ b/portage_with_autodep/pym/portage/cache/metadata.py @@ -6,6 +6,7 @@ import errno import re import stat import sys +from operator import attrgetter from portage import os from portage import _encodings from portage import _unicode_encode @@ -63,13 +64,14 @@ class database(flat_hash.database): if "INHERITED" in d: if self.ec is None: self.ec = portage.eclass_cache.cache(self.location[:-15]) + getter = attrgetter(self.validation_chf) try: - d["_eclasses_"] = self.ec.get_eclass_data( - d["INHERITED"].split()) + ec_data = self.ec.get_eclass_data(d["INHERITED"].split()) + d["_eclasses_"] = dict((k, (v.eclass_dir, getter(v))) + for k,v in ec_data.items()) except KeyError as e: # INHERITED contains a non-existent eclass. raise cache_errors.CacheCorruption(cpv, e) - del d["INHERITED"] else: d["_eclasses_"] = {} elif isinstance(d["_eclasses_"], basestring): diff --git a/portage_with_autodep/pym/portage/cache/metadata.pyo b/portage_with_autodep/pym/portage/cache/metadata.pyo Binary files differnew file mode 100644 index 0000000..c98445b --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/metadata.pyo diff --git a/portage_with_autodep/pym/portage/cache/metadata_overlay.py b/portage_with_autodep/pym/portage/cache/metadata_overlay.py deleted file mode 100644 index cfa0051..0000000 --- a/portage_with_autodep/pym/portage/cache/metadata_overlay.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 1999-2010 Gentoo Foundation -# Distributed under the terms of the GNU General Public License v2 - -from portage.cache import template -from portage.cache.cache_errors import CacheCorruption -from portage.cache.flat_hash import database as db_rw -from portage.cache.metadata import database as db_ro - -class database(template.database): - - serialize_eclasses = False - - def __init__(self, location, label, auxdbkeys, db_rw=db_rw, db_ro=db_ro, - *args, **config): - super_config = config.copy() - super_config.pop("gid", None) - super_config.pop("perms", None) - super(database, self).__init__(location, label, auxdbkeys, - *args, **super_config) - self.db_rw = db_rw(location, label, auxdbkeys, **config) - self.commit = self.db_rw.commit - self.autocommits = self.db_rw.autocommits - if isinstance(db_ro, type): - ro_config = config.copy() - ro_config["readonly"] = True - self.db_ro = db_ro(label, "metadata/cache", auxdbkeys, **ro_config) - else: - self.db_ro = db_ro - - def __getitem__(self, cpv): - """funnel whiteout validation through here, since value needs to be fetched""" - try: - value = self.db_rw[cpv] - except KeyError: - return self.db_ro[cpv] # raises a KeyError when necessary - except CacheCorruption: - del self.db_rw[cpv] - return self.db_ro[cpv] # raises a KeyError when necessary - if self._is_whiteout(value): - if self._is_whiteout_valid(cpv, value): - raise KeyError(cpv) - else: - del self.db_rw[cpv] - return self.db_ro[cpv] # raises a KeyError when necessary - else: - return value - - def _setitem(self, name, values): - try: - value_ro = self.db_ro.get(name) - except CacheCorruption: - value_ro = None - if value_ro is not None and \ - self._are_values_identical(value_ro, values): - # we have matching values in the underlying db_ro - # so it is unnecessary to store data in db_rw - try: - del self.db_rw[name] # delete unwanted whiteout when necessary - except KeyError: - pass - return - self.db_rw[name] = values - - def _delitem(self, cpv): - value = self[cpv] # validates whiteout and/or raises a KeyError when necessary - if cpv in self.db_ro: - self.db_rw[cpv] = self._create_whiteout(value) - else: - del self.db_rw[cpv] - - def __contains__(self, cpv): - try: - self[cpv] # validates whiteout when necessary - except KeyError: - return False - return True - - def __iter__(self): - s = set() - for cpv in self.db_rw: - if cpv in self: # validates whiteout when necessary - yield cpv - # set includes whiteouts so they won't be yielded later - s.add(cpv) - for cpv in self.db_ro: - if cpv not in s: - yield cpv - - def _is_whiteout(self, value): - return value["EAPI"] == "whiteout" - - def _create_whiteout(self, value): - return {"EAPI":"whiteout","_eclasses_":value["_eclasses_"],"_mtime_":value["_mtime_"]} - - def _is_whiteout_valid(self, name, value_rw): - try: - value_ro = self.db_ro[name] - return self._are_values_identical(value_rw,value_ro) - except KeyError: - return False - - def _are_values_identical(self, value1, value2): - if value1['_mtime_'] != value2['_mtime_']: - return False - return value1["_eclasses_"] == value2["_eclasses_"] diff --git a/portage_with_autodep/pym/portage/cache/sql_template.pyo b/portage_with_autodep/pym/portage/cache/sql_template.pyo Binary files differnew file mode 100644 index 0000000..e2c5974 --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/sql_template.pyo diff --git a/portage_with_autodep/pym/portage/cache/sqlite.pyo b/portage_with_autodep/pym/portage/cache/sqlite.pyo Binary files differnew file mode 100644 index 0000000..a82d25f --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/sqlite.pyo diff --git a/portage_with_autodep/pym/portage/cache/template.py b/portage_with_autodep/pym/portage/cache/template.py index f84d8f4..cf1e8ae 100644 --- a/portage_with_autodep/pym/portage/cache/template.py +++ b/portage_with_autodep/pym/portage/cache/template.py @@ -1,4 +1,4 @@ -# Copyright: 2005 Gentoo Foundation +# Copyright: 2005-2012 Gentoo Foundation # Author(s): Brian Harring (ferringb@gentoo.org) # License: GPL2 @@ -7,10 +7,14 @@ from portage.cache.cache_errors import InvalidRestriction from portage.cache.mappings import ProtectedDict import sys import warnings +import operator if sys.hexversion >= 0x3000000: + _unicode = str basestring = str long = int +else: + _unicode = unicode class database(object): # this is for metadata/cache transfer. @@ -21,6 +25,8 @@ class database(object): autocommits = False cleanse_keys = False serialize_eclasses = True + validation_chf = 'mtime' + store_eclass_paths = True def __init__(self, location, label, auxdbkeys, readonly=False): """ initialize the derived class; specifically, store label/keys""" @@ -40,9 +46,15 @@ class database(object): self.updates = 0 d=self._getitem(cpv) if self.serialize_eclasses and "_eclasses_" in d: - d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"]) + d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"], + self.validation_chf, paths=self.store_eclass_paths) elif "_eclasses_" not in d: d["_eclasses_"] = {} + # Never return INHERITED, since portdbapi.aux_get() will + # generate it automatically from _eclasses_, and we want + # to omit it in comparisons between cache entries like + # those that egencache uses to avoid redundant writes. + d.pop("INHERITED", None) mtime = d.get('_mtime_') if mtime is None: raise cache_errors.CacheCorruption(cpv, @@ -60,22 +72,46 @@ class database(object): override this in derived classess""" raise NotImplementedError + @staticmethod + def _internal_eclasses(extern_ec_dict, chf_type, paths): + """ + When serialize_eclasses is False, we have to convert an external + eclass dict containing hashed_path objects into an appropriate + internal dict containing values of chf_type (and eclass dirs + if store_eclass_paths is True). + """ + if not extern_ec_dict: + return extern_ec_dict + chf_getter = operator.attrgetter(chf_type) + if paths: + intern_ec_dict = dict((k, (v.eclass_dir, chf_getter(v))) + for k, v in extern_ec_dict.items()) + else: + intern_ec_dict = dict((k, chf_getter(v)) + for k, v in extern_ec_dict.items()) + return intern_ec_dict + def __setitem__(self, cpv, values): """set a cpv to values This shouldn't be overriden in derived classes since it handles the readonly checks""" if self.readonly: raise cache_errors.ReadOnlyRestriction() + d = None if self.cleanse_keys: d=ProtectedDict(values) for k, v in list(d.items()): if not v: del d[k] - if self.serialize_eclasses and "_eclasses_" in values: - d["_eclasses_"] = serialize_eclasses(d["_eclasses_"]) - elif self.serialize_eclasses and "_eclasses_" in values: - d = ProtectedDict(values) - d["_eclasses_"] = serialize_eclasses(d["_eclasses_"]) - else: + if "_eclasses_" in values: + if d is None: + d = ProtectedDict(values) + if self.serialize_eclasses: + d["_eclasses_"] = serialize_eclasses(d["_eclasses_"], + self.validation_chf, paths=self.store_eclass_paths) + else: + d["_eclasses_"] = self._internal_eclasses(d["_eclasses_"], + self.validation_chf, self.store_eclass_paths) + elif d is None: d = values self._setitem(cpv, d) if not self.autocommits: @@ -159,6 +195,23 @@ class database(object): except KeyError: return x + def validate_entry(self, entry, ebuild_hash, eclass_db): + hash_key = '_%s_' % self.validation_chf + try: + entry_hash = entry[hash_key] + except KeyError: + return False + else: + if entry_hash != getattr(ebuild_hash, self.validation_chf): + return False + update = eclass_db.validate_and_rewrite_cache(entry['_eclasses_'], self.validation_chf, + self.store_eclass_paths) + if update is None: + return False + if update: + entry['_eclasses_'] = update + return True + def get_matches(self, match_dict): """generic function for walking the entire cache db, matching restrictions to filter what cpv's are returned. Derived classes should override this if they @@ -195,7 +248,9 @@ class database(object): keys = __iter__ items = iteritems -def serialize_eclasses(eclass_dict): +_keysorter = operator.itemgetter(0) + +def serialize_eclasses(eclass_dict, chf_type='mtime', paths=True): """takes a dict, returns a string representing said dict""" """The "new format", which causes older versions of <portage-2.1.2 to traceback with a ValueError due to failed long() conversion. This format @@ -206,27 +261,40 @@ def serialize_eclasses(eclass_dict): """ if not eclass_dict: return "" - return "\t".join(k + "\t%s\t%s" % eclass_dict[k] \ - for k in sorted(eclass_dict)) + getter = operator.attrgetter(chf_type) + if paths: + return "\t".join("%s\t%s\t%s" % (k, v.eclass_dir, getter(v)) + for k, v in sorted(eclass_dict.items(), key=_keysorter)) + return "\t".join("%s\t%s" % (k, getter(v)) + for k, v in sorted(eclass_dict.items(), key=_keysorter)) + -def reconstruct_eclasses(cpv, eclass_string): +def reconstruct_eclasses(cpv, eclass_string, chf_type='mtime', paths=True): """returns a dict when handed a string generated by serialize_eclasses""" eclasses = eclass_string.rstrip().lstrip().split("\t") if eclasses == [""]: # occasionally this occurs in the fs backends. they suck. return {} - - if len(eclasses) % 2 != 0 and len(eclasses) % 3 != 0: + + converter = _unicode + if chf_type == 'mtime': + converter = long + + if paths: + if len(eclasses) % 3 != 0: + raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses)) + elif len(eclasses) % 2 != 0: raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses)) d={} try: - if eclasses[1].isdigit(): - for x in range(0, len(eclasses), 2): - d[eclasses[x]] = ("", long(eclasses[x + 1])) - else: + i = iter(eclasses) + if paths: # The old format contains paths that will be discarded. - for x in range(0, len(eclasses), 3): - d[eclasses[x]] = (eclasses[x + 1], long(eclasses[x + 2])) + for name, path, val in zip(i, i, i): + d[name] = (path, converter(val)) + else: + for name, val in zip(i, i): + d[name] = converter(val) except IndexError: raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses)) diff --git a/portage_with_autodep/pym/portage/cache/template.pyo b/portage_with_autodep/pym/portage/cache/template.pyo Binary files differnew file mode 100644 index 0000000..45da015 --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/template.pyo diff --git a/portage_with_autodep/pym/portage/cache/util.py b/portage_with_autodep/pym/portage/cache/util.py deleted file mode 100644 index b824689..0000000 --- a/portage_with_autodep/pym/portage/cache/util.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright: 2005 Gentoo Foundation -# Author(s): Brian Harring (ferringb@gentoo.org) -# License: GPL2 - -from __future__ import print_function - -__all__ = ["mirror_cache", "non_quiet_mirroring", "quiet_mirroring"] - -from itertools import chain -from portage.cache import cache_errors -from portage.localization import _ - -def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, eclass_cache=None, verbose_instance=None): - - from portage import eapi_is_supported, \ - _validate_cache_for_unsupported_eapis - if not src_cache.complete_eclass_entries and not eclass_cache: - raise Exception("eclass_cache required for cache's of class %s!" % src_cache.__class__) - - if verbose_instance == None: - noise=quiet_mirroring() - else: - noise=verbose_instance - - dead_nodes = set(trg_cache) - count=0 - - if not trg_cache.autocommits: - trg_cache.sync(100) - - for x in valid_nodes_iterable: -# print "processing x=",x - count+=1 - dead_nodes.discard(x) - try: - entry = src_cache[x] - except KeyError as e: - noise.missing_entry(x) - del e - continue - except cache_errors.CacheError as ce: - noise.exception(x, ce) - del ce - continue - - eapi = entry.get('EAPI') - if not eapi: - eapi = '0' - eapi = eapi.lstrip('-') - eapi_supported = eapi_is_supported(eapi) - if not eapi_supported: - if not _validate_cache_for_unsupported_eapis: - noise.misc(x, _("unable to validate cache for EAPI='%s'") % eapi) - continue - - write_it = True - trg = None - try: - trg = trg_cache[x] - except (KeyError, cache_errors.CacheError): - pass - else: - if trg['_mtime_'] == entry['_mtime_'] and \ - eclass_cache.is_eclass_data_valid(trg['_eclasses_']) and \ - set(trg['_eclasses_']) == set(entry['_eclasses_']): - write_it = False - - for d in (entry, trg): - if d is not None and d.get('EAPI') in ('', '0'): - del d['EAPI'] - - if trg and not write_it: - """ We don't want to skip the write unless we're really sure that - the existing cache is identical, so don't trust _mtime_ and - _eclasses_ alone.""" - for k in set(chain(entry, trg)).difference( - ("_mtime_", "_eclasses_")): - if trg.get(k, "") != entry.get(k, ""): - write_it = True - break - - if write_it: - try: - inherited = entry.get("INHERITED", "") - eclasses = entry.get("_eclasses_") - except cache_errors.CacheError as ce: - noise.exception(x, ce) - del ce - continue - - if eclasses is not None: - if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]): - noise.eclass_stale(x) - continue - inherited = eclasses - else: - inherited = inherited.split() - - if inherited: - if src_cache.complete_eclass_entries and eclasses is None: - noise.corruption(x, "missing _eclasses_ field") - continue - - # Even if _eclasses_ already exists, replace it with data from - # eclass_cache, in order to insert local eclass paths. - try: - eclasses = eclass_cache.get_eclass_data(inherited) - except KeyError: - # INHERITED contains a non-existent eclass. - noise.eclass_stale(x) - continue - - if eclasses is None: - noise.eclass_stale(x) - continue - entry["_eclasses_"] = eclasses - - if not eapi_supported: - for k in set(entry).difference(("_mtime_", "_eclasses_")): - entry[k] = "" - entry["EAPI"] = "-" + eapi - - # by this time, if it reaches here, the eclass has been validated, and the entry has - # been updated/translated (if needs be, for metadata/cache mainly) - try: - trg_cache[x] = entry - except cache_errors.CacheError as ce: - noise.exception(x, ce) - del ce - continue - if count >= noise.call_update_min: - noise.update(x) - count = 0 - - if not trg_cache.autocommits: - trg_cache.commit() - - # ok. by this time, the trg_cache is up to date, and we have a dict - # with a crapload of cpv's. we now walk the target db, removing stuff if it's in the list. - for key in dead_nodes: - try: - del trg_cache[key] - except KeyError: - pass - except cache_errors.CacheError as ce: - noise.exception(ce) - del ce - noise.finish() - - -class quiet_mirroring(object): - # call_update_every is used by mirror_cache to determine how often to call in. - # quiet defaults to 2^24 -1. Don't call update, 'cept once every 16 million or so :) - call_update_min = 0xffffff - def update(self,key,*arg): pass - def exception(self,key,*arg): pass - def eclass_stale(self,*arg): pass - def missing_entry(self, key): pass - def misc(self,key,*arg): pass - def corruption(self, key, s): pass - def finish(self, *arg): pass - -class non_quiet_mirroring(quiet_mirroring): - call_update_min=1 - def update(self,key,*arg): print("processed",key) - def exception(self, key, *arg): print("exec",key,arg) - def missing(self,key): print("key %s is missing", key) - def corruption(self,key,*arg): print("corrupt %s:" % key,arg) - def eclass_stale(self,key,*arg):print("stale %s:"%key,arg) - diff --git a/portage_with_autodep/pym/portage/cache/volatile.py b/portage_with_autodep/pym/portage/cache/volatile.py index 0bf6bab..5516745 100644 --- a/portage_with_autodep/pym/portage/cache/volatile.py +++ b/portage_with_autodep/pym/portage/cache/volatile.py @@ -8,18 +8,23 @@ class database(template.database): autocommits = True serialize_eclasses = False + store_eclass_paths = False def __init__(self, *args, **config): config.pop("gid", None) config.pop("perms", None) super(database, self).__init__(*args, **config) self._data = {} - self.__iter__ = self._data.__iter__ self._delitem = self._data.__delitem__ - self.__contains__ = self._data.__contains__ def _setitem(self, name, values): self._data[name] = copy.deepcopy(values) - def _getitem(self, cpv): + def __getitem__(self, cpv): return copy.deepcopy(self._data[cpv]) + + def __iter__(self): + return iter(self._data) + + def __contains__(self, key): + return key in self._data diff --git a/portage_with_autodep/pym/portage/cache/volatile.pyo b/portage_with_autodep/pym/portage/cache/volatile.pyo Binary files differnew file mode 100644 index 0000000..fac5d55 --- /dev/null +++ b/portage_with_autodep/pym/portage/cache/volatile.pyo diff --git a/portage_with_autodep/pym/portage/checksum.py b/portage_with_autodep/pym/portage/checksum.py index 9e7e455..bd416ac 100644 --- a/portage_with_autodep/pym/portage/checksum.py +++ b/portage_with_autodep/pym/portage/checksum.py @@ -1,5 +1,5 @@ # checksum.py -- core Portage functionality -# Copyright 1998-2011 Gentoo Foundation +# Copyright 1998-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import portage @@ -16,8 +16,31 @@ import tempfile hashfunc_map = {} hashorigin_map = {} -def _generate_hash_function(hashtype, hashobject, origin="unknown"): - def pyhash(filename): +def _open_file(filename): + try: + return open(_unicode_encode(filename, + encoding=_encodings['fs'], errors='strict'), 'rb') + except IOError as e: + func_call = "open('%s')" % filename + if e.errno == errno.EPERM: + raise portage.exception.OperationNotPermitted(func_call) + elif e.errno == errno.EACCES: + raise portage.exception.PermissionDenied(func_call) + elif e.errno == errno.ENOENT: + raise portage.exception.FileNotFound(filename) + else: + raise + +class _generate_hash_function(object): + + __slots__ = ("_hashobject",) + + def __init__(self, hashtype, hashobject, origin="unknown"): + self._hashobject = hashobject + hashfunc_map[hashtype] = self + hashorigin_map[hashtype] = origin + + def __call__(self, filename): """ Run a checksum against a file. @@ -25,23 +48,11 @@ def _generate_hash_function(hashtype, hashobject, origin="unknown"): @type filename: String @return: The hash and size of the data """ - try: - f = open(_unicode_encode(filename, - encoding=_encodings['fs'], errors='strict'), 'rb') - except IOError as e: - func_call = "open('%s')" % filename - if e.errno == errno.EPERM: - raise portage.exception.OperationNotPermitted(func_call) - elif e.errno == errno.EACCES: - raise portage.exception.PermissionDenied(func_call) - elif e.errno == errno.ENOENT: - raise portage.exception.FileNotFound(filename) - else: - raise + f = _open_file(filename) blocksize = HASHING_BLOCKSIZE data = f.read(blocksize) size = 0 - checksum = hashobject() + checksum = self._hashobject() while data: checksum.update(data) size = size + len(data) @@ -49,9 +60,6 @@ def _generate_hash_function(hashtype, hashobject, origin="unknown"): f.close() return (checksum.hexdigest(), size) - hashfunc_map[hashtype] = pyhash - hashorigin_map[hashtype] = origin - return pyhash # Define hash functions, try to use the best module available. Later definitions # override earlier ones @@ -71,40 +79,72 @@ except ImportError: sha1hash = _generate_hash_function("SHA1", _new_sha1, origin="internal") +# Try to use mhash if available +# mhash causes GIL presently, so it gets less priority than hashlib and +# pycrypto. However, it might be the only accelerated implementation of +# WHIRLPOOL available. +try: + import mhash, functools + md5hash = _generate_hash_function("MD5", functools.partial(mhash.MHASH, mhash.MHASH_MD5), origin="mhash") + sha1hash = _generate_hash_function("SHA1", functools.partial(mhash.MHASH, mhash.MHASH_SHA1), origin="mhash") + sha256hash = _generate_hash_function("SHA256", functools.partial(mhash.MHASH, mhash.MHASH_SHA256), origin="mhash") + sha512hash = _generate_hash_function("SHA512", functools.partial(mhash.MHASH, mhash.MHASH_SHA512), origin="mhash") + for local_name, hash_name in (("rmd160", "ripemd160"), ("whirlpool", "whirlpool")): + if hasattr(mhash, 'MHASH_%s' % local_name.upper()): + globals()['%shash' % local_name] = \ + _generate_hash_function(local_name.upper(), \ + functools.partial(mhash.MHASH, getattr(mhash, 'MHASH_%s' % hash_name.upper())), \ + origin='mhash') +except ImportError: + pass + # Use pycrypto when available, prefer it over the internal fallbacks +# Check for 'new' attributes, since they can be missing if the module +# is broken somehow. try: from Crypto.Hash import SHA256, RIPEMD - sha256hash = _generate_hash_function("SHA256", SHA256.new, origin="pycrypto") - rmd160hash = _generate_hash_function("RMD160", RIPEMD.new, origin="pycrypto") -except ImportError as e: + sha256hash = getattr(SHA256, 'new', None) + if sha256hash is not None: + sha256hash = _generate_hash_function("SHA256", + sha256hash, origin="pycrypto") + rmd160hash = getattr(RIPEMD, 'new', None) + if rmd160hash is not None: + rmd160hash = _generate_hash_function("RMD160", + rmd160hash, origin="pycrypto") +except ImportError: pass # Use hashlib from python-2.5 if available and prefer it over pycrypto and internal fallbacks. -# Need special handling for RMD160 as it may not always be provided by hashlib. +# Need special handling for RMD160/WHIRLPOOL as they may not always be provided by hashlib. try: - import hashlib + import hashlib, functools md5hash = _generate_hash_function("MD5", hashlib.md5, origin="hashlib") sha1hash = _generate_hash_function("SHA1", hashlib.sha1, origin="hashlib") sha256hash = _generate_hash_function("SHA256", hashlib.sha256, origin="hashlib") - try: - hashlib.new('ripemd160') - except ValueError: - pass - else: - def rmd160(): - return hashlib.new('ripemd160') - rmd160hash = _generate_hash_function("RMD160", rmd160, origin="hashlib") -except ImportError as e: + sha512hash = _generate_hash_function("SHA512", hashlib.sha512, origin="hashlib") + for local_name, hash_name in (("rmd160", "ripemd160"), ("whirlpool", "whirlpool")): + try: + hashlib.new(hash_name) + except ValueError: + pass + else: + globals()['%shash' % local_name] = \ + _generate_hash_function(local_name.upper(), \ + functools.partial(hashlib.new, hash_name), \ + origin='hashlib') + +except ImportError: pass - + +if "WHIRLPOOL" not in hashfunc_map: + # Bundled WHIRLPOOL implementation + from portage.util.whirlpool import new as _new_whirlpool + whirlpoolhash = _generate_hash_function("WHIRLPOOL", _new_whirlpool, origin="bundled") # Use python-fchksum if available, prefer it over all other MD5 implementations try: - import fchksum - - def md5hash(filename): - return fchksum.fmd5t(filename) + from fchksum import fmd5t as md5hash hashfunc_map["MD5"] = md5hash hashorigin_map["MD5"] = "python-fchksum" @@ -127,6 +167,15 @@ if os.path.exists(PRELINK_BINARY): prelink_capable=1 del results +def is_prelinkable_elf(filename): + f = _open_file(filename) + try: + magic = f.read(17) + finally: + f.close() + return (len(magic) == 17 and magic.startswith(b'\x7fELF') and + magic[16] in (b'\x02', b'\x03')) # 2=ET_EXEC, 3=ET_DYN + def perform_md5(x, calc_prelink=0): return perform_checksum(x, "MD5", calc_prelink)[0] @@ -137,7 +186,7 @@ def _perform_md5_merge(x, **kwargs): def perform_all(x, calc_prelink=0): mydict = {} for k in hashfunc_map: - mydict[k] = perform_checksum(x, hashfunc_map[k], calc_prelink)[0] + mydict[k] = perform_checksum(x, k, calc_prelink)[0] return mydict def get_valid_checksum_keys(): @@ -234,7 +283,8 @@ def perform_checksum(filename, hashname="MD5", calc_prelink=0): myfilename = filename prelink_tmpfile = None try: - if calc_prelink and prelink_capable: + if (calc_prelink and prelink_capable and + is_prelinkable_elf(filename)): # Create non-prelinked temporary file to checksum. # Files rejected by prelink are summed in place. try: @@ -255,8 +305,10 @@ def perform_checksum(filename, hashname="MD5", calc_prelink=0): " hash function not available (needs dev-python/pycrypto)") myhash, mysize = hashfunc_map[hashname](myfilename) except (OSError, IOError) as e: - if e.errno == errno.ENOENT: + if e.errno in (errno.ENOENT, errno.ESTALE): raise portage.exception.FileNotFound(myfilename) + elif e.errno == portage.exception.PermissionDenied.errno: + raise portage.exception.PermissionDenied(myfilename) raise return myhash, mysize finally: diff --git a/portage_with_autodep/pym/portage/checksum.pyo b/portage_with_autodep/pym/portage/checksum.pyo Binary files differnew file mode 100644 index 0000000..00231af --- /dev/null +++ b/portage_with_autodep/pym/portage/checksum.pyo diff --git a/portage_with_autodep/pym/portage/const.py b/portage_with_autodep/pym/portage/const.py index 2a391db..614dcdb 100644 --- a/portage_with_autodep/pym/portage/const.py +++ b/portage_with_autodep/pym/portage/const.py @@ -67,8 +67,7 @@ FAKEROOT_BINARY = "/usr/bin/fakeroot" BASH_BINARY = "/bin/bash" MOVE_BINARY = "/bin/mv" PRELINK_BINARY = "/usr/sbin/prelink" -AUTODEP_LIBRARY = "/usr/lib/file_hook.so" -#AUTODEP_LIBRARY = "/home/bay/autodep/src/hook_lib/file_hook.so" +AUTODEP_LIBRARY = "/usr/lib/file_hook.so" INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env" @@ -89,12 +88,12 @@ EBUILD_PHASES = ("pretend", "setup", "unpack", "prepare", "configure" "package", "preinst", "postinst","prerm", "postrm", "nofetch", "config", "info", "other") SUPPORTED_FEATURES = frozenset([ - "allow-missing-manifests", "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy", - "ccache", "chflags", "collision-protect", "compress-build-logs", - "depcheck", "depcheckstrict", + "ccache", "chflags", "clean-logs", + "collision-protect", "compress-build-logs", "compressdebug", + "config-protect-if-modified", "depcheck", "depcheckstrict", "digest", "distcc", "distcc-pump", "distlocks", "ebuild-locks", "fakeroot", - "fail-clean", "fixpackages", "force-mirror", "getbinpkg", + "fail-clean", "force-mirror", "force-prefix", "getbinpkg", "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror", "metadata-transfer", "mirror", "multilib-strict", "news", "noauto", "noclean", "nodoc", "noinfo", "noman", @@ -107,18 +106,57 @@ SUPPORTED_FEATURES = frozenset([ "strict", "stricter", "suidctl", "test", "test-fail-continue", "unknown-features-filter", "unknown-features-warn", "unmerge-logs", "unmerge-orphans", "userfetch", "userpriv", - "usersandbox", "usersync", "webrsync-gpg"]) + "usersandbox", "usersync", "webrsync-gpg", "xattr"]) EAPI = 4 HASHING_BLOCKSIZE = 32768 MANIFEST1_HASH_FUNCTIONS = ("MD5", "SHA256", "RMD160") -MANIFEST2_HASH_FUNCTIONS = ("SHA1", "SHA256", "RMD160") - MANIFEST1_REQUIRED_HASH = "MD5" -MANIFEST2_REQUIRED_HASH = "SHA1" + +# Future events: +# +# After WHIRLPOOL is supported in stable portage: +# - Add SHA256 and WHIRLPOOL to MANIFEST2_HASH_DEFAULTS. +# - Remove SHA1 and RMD160 from MANIFEST2_HASH_*. +# - Set manifest-hashes in gentoo-x86/metadata/layout.conf as follows: +# manifest-hashes = SHA256 SHA512 WHIRLPOOL +# +# After WHIRLPOOL is supported in stable portage for at least 1 year: +# - Change MANIFEST2_REQUIRED_HASH to WHIRLPOOL. +# - Remove SHA256 from MANIFEST2_HASH_*. +# - Set manifest-hashes in gentoo-x86/metadata/layout.conf as follows: +# manifest-hashes = SHA512 WHIRLPOOL +# +# After SHA-3 is approved: +# - Add new hashes to MANIFEST2_HASH_*. +# +# After SHA-3 is supported in stable portage: +# - Set manifest-hashes in gentoo-x86/metadata/layout.conf as follows: +# manifest-hashes = SHA3 SHA512 WHIRLPOOL +# +# After layout.conf settings correspond to defaults in stable portage: +# - Remove redundant settings from gentoo-x86/metadata/layout.conf. + +MANIFEST2_HASH_FUNCTIONS = ("RMD160", "SHA1", "SHA256", "SHA512", "WHIRLPOOL") +MANIFEST2_HASH_DEFAULTS = frozenset(["SHA1", "SHA256", "RMD160"]) +MANIFEST2_REQUIRED_HASH = "SHA256" MANIFEST2_IDENTIFIERS = ("AUX", "MISC", "DIST", "EBUILD") + +# The EPREFIX for the current install is hardcoded here, but access to this +# constant should be minimal, in favor of access via the EPREFIX setting of +# a config instance (since it's possible to contruct a config instance with +# a different EPREFIX). Therefore, the EPREFIX constant should *NOT* be used +# in the definition of any other constants within this file. +EPREFIX="" + +# pick up EPREFIX from the environment if set +if "PORTAGE_OVERRIDE_EPREFIX" in os.environ: + EPREFIX = os.environ["PORTAGE_OVERRIDE_EPREFIX"] + if EPREFIX: + EPREFIX = os.path.normpath(EPREFIX) + # =========================================================================== # END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT # =========================================================================== @@ -129,7 +167,6 @@ _ENABLE_DYN_LINK_MAP = True _ENABLE_PRESERVE_LIBS = True _ENABLE_REPO_NAME_WARN = True _ENABLE_SET_CONFIG = True -_SANDBOX_COMPAT_LEVEL = "22" # The definitions above will differ between branches, so it's useful to have diff --git a/portage_with_autodep/pym/portage/const.py.rej b/portage_with_autodep/pym/portage/const.py.rej new file mode 100644 index 0000000..9fe70f8 --- /dev/null +++ b/portage_with_autodep/pym/portage/const.py.rej @@ -0,0 +1,12 @@ +--- pym/portage/const.py ++++ pym/portage/const.py +@@ -90,7 +92,8 @@ + SUPPORTED_FEATURES = frozenset([ + "allow-missing-manifests", + "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy", +- "ccache", "chflags", "collision-protect", "compress-build-logs", ++ "ccache", "chflags", "collision-protect", "compress-build-logs", ++ "depcheck", "depcheckstrict", + "digest", "distcc", "distcc-pump", "distlocks", "ebuild-locks", "fakeroot", + "fail-clean", "fixpackages", "force-mirror", "getbinpkg", + "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror", diff --git a/portage_with_autodep/pym/portage/const.pyo b/portage_with_autodep/pym/portage/const.pyo Binary files differnew file mode 100644 index 0000000..804420f --- /dev/null +++ b/portage_with_autodep/pym/portage/const.pyo diff --git a/portage_with_autodep/pym/portage/cvstree.py b/portage_with_autodep/pym/portage/cvstree.py index 9ba22f3..3680ae4 100644 --- a/portage_with_autodep/pym/portage/cvstree.py +++ b/portage_with_autodep/pym/portage/cvstree.py @@ -248,11 +248,13 @@ def getentries(mydir,recursive=0): if entries["files"][mysplit[1]]["revision"][0]=="-": entries["files"][mysplit[1]]["status"]+=["removed"] - for file in apply_cvsignore_filter(os.listdir(mydir)): + for file in os.listdir(mydir): if file=="CVS": continue if os.path.isdir(mydir+"/"+file): if file not in entries["dirs"]: + if ignore_list.match(file) is not None: + continue entries["dirs"][file]={"dirs":{},"files":{}} # It's normal for a directory to be unlisted in Entries # when checked out without -P (see bug #257660). @@ -266,6 +268,8 @@ def getentries(mydir,recursive=0): entries["dirs"][file]["status"]=["exists"] elif os.path.isfile(mydir+"/"+file): if file not in entries["files"]: + if ignore_list.match(file) is not None: + continue entries["files"][file]={"revision":"","date":"","flags":"","tags":""} if "status" in entries["files"][file]: if "exists" not in entries["files"][file]["status"]: @@ -285,7 +289,9 @@ def getentries(mydir,recursive=0): print("failed to stat",file) print(e) return - + + elif ignore_list.match(file) is not None: + pass else: print() print("File of unknown type:",mydir+"/"+file) diff --git a/portage_with_autodep/pym/portage/cvstree.pyo b/portage_with_autodep/pym/portage/cvstree.pyo Binary files differnew file mode 100644 index 0000000..4719daf --- /dev/null +++ b/portage_with_autodep/pym/portage/cvstree.pyo diff --git a/portage_with_autodep/pym/portage/data.py b/portage_with_autodep/pym/portage/data.py index c38fa17..c4d967a 100644 --- a/portage_with_autodep/pym/portage/data.py +++ b/portage_with_autodep/pym/portage/data.py @@ -1,5 +1,5 @@ # data.py -- Calculated/Discovered Data Values -# Copyright 1998-2010 Gentoo Foundation +# Copyright 1998-2011 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import os, pwd, grp, platform @@ -58,65 +58,165 @@ def portage_group_warning(): # If the "wheel" group does not exist then wheelgid falls back to 0. # If the "portage" group does not exist then portage_uid falls back to wheelgid. -secpass=0 - uid=os.getuid() wheelgid=0 -if uid==0: - secpass=2 try: wheelgid=grp.getgrnam("wheel")[2] except KeyError: pass -# Allow the overriding of the user used for 'userpriv' and 'userfetch' -_portage_uname = os.environ.get('PORTAGE_USERNAME', 'portage') -_portage_grpname = os.environ.get('PORTAGE_GRPNAME', 'portage') +# The portage_uid and portage_gid global constants, and others that +# depend on them are initialized lazily, in order to allow configuration +# via make.conf. Eventually, these constants may be deprecated in favor +# of config attributes, since it's conceivable that multiple +# configurations with different constants could be used simultaneously. +_initialized_globals = set() -#Discover the uid and gid of the portage user/group -try: - portage_uid = pwd.getpwnam(_portage_uname)[2] - portage_gid = grp.getgrnam(_portage_grpname)[2] - if secpass < 1 and portage_gid in os.getgroups(): - secpass=1 -except KeyError: - portage_uid=0 - portage_gid=0 - userpriv_groups = [portage_gid] - writemsg(colorize("BAD", - _("portage: 'portage' user or group missing.")) + "\n", noiselevel=-1) - writemsg(_( - " For the defaults, line 1 goes into passwd, " - "and 2 into group.\n"), noiselevel=-1) - writemsg(colorize("GOOD", - " portage:x:250:250:portage:/var/tmp/portage:/bin/false") \ - + "\n", noiselevel=-1) - writemsg(colorize("GOOD", " portage::250:portage") + "\n", - noiselevel=-1) - portage_group_warning() -else: - userpriv_groups = [portage_gid] - if secpass >= 2: - class _LazyUserprivGroups(portage.proxy.objectproxy.ObjectProxy): - def _get_target(self): - global userpriv_groups - if userpriv_groups is not self: - return userpriv_groups - userpriv_groups = _userpriv_groups - # Get a list of group IDs for the portage user. Do not use - # grp.getgrall() since it is known to trigger spurious - # SIGPIPE problems with nss_ldap. - mystatus, myoutput = \ - portage.subprocess_getstatusoutput("id -G %s" % _portage_uname) - if mystatus == os.EX_OK: - for x in myoutput.split(): - try: - userpriv_groups.append(int(x)) - except ValueError: - pass - userpriv_groups[:] = sorted(set(userpriv_groups)) - return userpriv_groups - - _userpriv_groups = userpriv_groups - userpriv_groups = _LazyUserprivGroups() +def _get_global(k): + if k in _initialized_globals: + return globals()[k] + + if k in ('portage_gid', 'portage_uid', 'secpass'): + global portage_gid, portage_uid, secpass + secpass = 0 + if uid == 0: + secpass = 2 + elif portage.const.EPREFIX: + secpass = 2 + #Discover the uid and gid of the portage user/group + try: + portage_uid = pwd.getpwnam(_get_global('_portage_username')).pw_uid + _portage_grpname = _get_global('_portage_grpname') + if platform.python_implementation() == 'PyPy': + # Somehow this prevents "TypeError: expected string" errors + # from grp.getgrnam() with PyPy 1.7 + _portage_grpname = str(_portage_grpname) + portage_gid = grp.getgrnam(_portage_grpname).gr_gid + if secpass < 1 and portage_gid in os.getgroups(): + secpass = 1 + except KeyError: + portage_uid = 0 + portage_gid = 0 + writemsg(colorize("BAD", + _("portage: 'portage' user or group missing.")) + "\n", noiselevel=-1) + writemsg(_( + " For the defaults, line 1 goes into passwd, " + "and 2 into group.\n"), noiselevel=-1) + writemsg(colorize("GOOD", + " portage:x:250:250:portage:/var/tmp/portage:/bin/false") \ + + "\n", noiselevel=-1) + writemsg(colorize("GOOD", " portage::250:portage") + "\n", + noiselevel=-1) + portage_group_warning() + + _initialized_globals.add('portage_gid') + _initialized_globals.add('portage_uid') + _initialized_globals.add('secpass') + + if k == 'portage_gid': + return portage_gid + elif k == 'portage_uid': + return portage_uid + elif k == 'secpass': + return secpass + else: + raise AssertionError('unknown name: %s' % k) + + elif k == 'userpriv_groups': + v = [portage_gid] + if secpass >= 2: + # Get a list of group IDs for the portage user. Do not use + # grp.getgrall() since it is known to trigger spurious + # SIGPIPE problems with nss_ldap. + mystatus, myoutput = \ + portage.subprocess_getstatusoutput("id -G %s" % _portage_username) + if mystatus == os.EX_OK: + for x in myoutput.split(): + try: + v.append(int(x)) + except ValueError: + pass + v = sorted(set(v)) + + # Avoid instantiating portage.settings when the desired + # variable is set in os.environ. + elif k in ('_portage_grpname', '_portage_username'): + v = None + if k == '_portage_grpname': + env_key = 'PORTAGE_GRPNAME' + else: + env_key = 'PORTAGE_USERNAME' + + if env_key in os.environ: + v = os.environ[env_key] + elif hasattr(portage, 'settings'): + v = portage.settings.get(env_key) + elif portage.const.EPREFIX: + # For prefix environments, default to the UID and GID of + # the top-level EROOT directory. The config class has + # equivalent code, but we also need to do it here if + # _disable_legacy_globals() has been called. + eroot = os.path.join(os.environ.get('ROOT', os.sep), + portage.const.EPREFIX.lstrip(os.sep)) + try: + eroot_st = os.stat(eroot) + except OSError: + pass + else: + if k == '_portage_grpname': + try: + grp_struct = grp.getgrgid(eroot_st.st_gid) + except KeyError: + pass + else: + v = grp_struct.gr_name + else: + try: + pwd_struct = pwd.getpwuid(eroot_st.st_uid) + except KeyError: + pass + else: + v = pwd_struct.pw_name + + if v is None: + v = 'portage' + else: + raise AssertionError('unknown name: %s' % k) + + globals()[k] = v + _initialized_globals.add(k) + return v + +class _GlobalProxy(portage.proxy.objectproxy.ObjectProxy): + + __slots__ = ('_name',) + + def __init__(self, name): + portage.proxy.objectproxy.ObjectProxy.__init__(self) + object.__setattr__(self, '_name', name) + + def _get_target(self): + return _get_global(object.__getattribute__(self, '_name')) + +for k in ('portage_gid', 'portage_uid', 'secpass', 'userpriv_groups', + '_portage_grpname', '_portage_username'): + globals()[k] = _GlobalProxy(k) +del k + +def _init(settings): + """ + Use config variables like PORTAGE_GRPNAME and PORTAGE_USERNAME to + initialize global variables. This allows settings to come from make.conf + instead of requiring them to be set in the calling environment. + """ + if '_portage_grpname' not in _initialized_globals and \ + '_portage_username' not in _initialized_globals: + + v = settings.get('PORTAGE_GRPNAME', 'portage') + globals()['_portage_grpname'] = v + _initialized_globals.add('_portage_grpname') + + v = settings.get('PORTAGE_USERNAME', 'portage') + globals()['_portage_username'] = v + _initialized_globals.add('_portage_username') diff --git a/portage_with_autodep/pym/portage/data.pyo b/portage_with_autodep/pym/portage/data.pyo Binary files differnew file mode 100644 index 0000000..7f749e0 --- /dev/null +++ b/portage_with_autodep/pym/portage/data.pyo diff --git a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py index 34ed031..b5f6a0b 100644 --- a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py +++ b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py @@ -1,22 +1,16 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import io -import shutil import signal -import tempfile +import sys import traceback import errno import fcntl import portage from portage import os, _unicode_decode -from portage.const import PORTAGE_PACKAGE_ATOM -from portage.dep import match_from_list import portage.elog.messages -from portage.elog import _preload_elog_modules -from portage.util import ensure_dirs -from _emerge.PollConstants import PollConstants from _emerge.SpawnProcess import SpawnProcess class MergeProcess(SpawnProcess): @@ -26,7 +20,7 @@ class MergeProcess(SpawnProcess): """ __slots__ = ('mycat', 'mypkg', 'settings', 'treetype', - 'vartree', 'scheduler', 'blockers', 'pkgloc', 'infloc', 'myebuild', + 'vartree', 'blockers', 'pkgloc', 'infloc', 'myebuild', 'mydbapi', 'prev_mtimes', 'unmerge', '_elog_reader_fd', '_elog_reg_id', '_buf', '_elog_keys', '_locked_vdb') @@ -46,8 +40,12 @@ class MergeProcess(SpawnProcess): settings.reset() settings.setcpv(cpv, mydb=self.mydbapi) - if not self.unmerge: - self._handle_self_reinstall() + # Inherit stdin by default, so that the pdb SIGUSR1 + # handler is usable for the subprocess. + if self.fd_pipes is None: + self.fd_pipes = {} + self.fd_pipes.setdefault(0, sys.stdin.fileno()) + super(MergeProcess, self)._start() def _lock_vdb(self): @@ -69,59 +67,9 @@ class MergeProcess(SpawnProcess): self.vartree.dbapi.unlock() self._locked_vdb = False - def _handle_self_reinstall(self): - """ - If portage is reinstalling itself, create temporary - copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order - to avoid relying on the new versions which may be - incompatible. Register an atexit hook to clean up the - temporary directories. Pre-load elog modules here since - we won't be able to later if they get unmerged (happens - when namespace changes). - """ - - settings = self.settings - cpv = settings.mycpv - reinstall_self = False - if self.settings["ROOT"] == "/" and \ - match_from_list(PORTAGE_PACKAGE_ATOM, [cpv]): - inherited = frozenset(self.settings.get('INHERITED', '').split()) - if not self.vartree.dbapi.cpv_exists(cpv) or \ - '9999' in cpv or \ - 'git' in inherited or \ - 'git-2' in inherited: - reinstall_self = True - - if reinstall_self: - # Load lazily referenced portage submodules into memory, - # so imports won't fail during portage upgrade/downgrade. - _preload_elog_modules(self.settings) - portage.proxy.lazyimport._preload_portage_submodules() - - # Make the temp directory inside $PORTAGE_TMPDIR/portage, since - # it's common for /tmp and /var/tmp to be mounted with the - # "noexec" option (see bug #346899). - build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage") - ensure_dirs(build_prefix) - base_path_tmp = tempfile.mkdtemp( - "", "._portage_reinstall_.", build_prefix) - portage.process.atexit_register(shutil.rmtree, base_path_tmp) - dir_perms = 0o755 - for subdir in "bin", "pym": - var_name = "PORTAGE_%s_PATH" % subdir.upper() - var_orig = settings[var_name] - var_new = os.path.join(base_path_tmp, subdir) - settings[var_name] = var_new - settings.backup_changes(var_name) - shutil.copytree(var_orig, var_new, symlinks=True) - os.chmod(var_new, dir_perms) - portage._bin_path = settings['PORTAGE_BIN_PATH'] - portage._pym_path = settings['PORTAGE_PYM_PATH'] - os.chmod(base_path_tmp, dir_perms) - def _elog_output_handler(self, fd, event): output = None - if event & PollConstants.POLLIN: + if event & self.scheduler.IO_IN: try: output = os.read(fd, self._bufsize) except OSError as e: @@ -141,6 +89,15 @@ class MergeProcess(SpawnProcess): reporter = getattr(portage.elog.messages, funcname) reporter(msg, phase=phase, key=key, out=out) + if event & self.scheduler.IO_HUP: + self.scheduler.unregister(self._elog_reg_id) + self._elog_reg_id = None + os.close(self._elog_reader_fd) + self._elog_reader_fd = None + return False + + return True + def _spawn(self, args, fd_pipes, **kwargs): """ Fork a subprocess, apply local settings, and call @@ -178,6 +135,10 @@ class MergeProcess(SpawnProcess): pid = os.fork() if pid != 0: + if not isinstance(pid, int): + raise AssertionError( + "fork returned non-integer: %s" % (repr(pid),)) + os.close(elog_writer_fd) self._elog_reader_fd = elog_reader_fd self._buf = "" @@ -193,7 +154,9 @@ class MergeProcess(SpawnProcess): return [pid] os.close(elog_reader_fd) - portage.process._setup_pipes(fd_pipes) + portage.locks._close_fds() + # Disable close_fds since we don't exec (see _setup_pipes docstring). + portage.process._setup_pipes(fd_pipes, close_fds=False) # Use default signal handlers since the ones inherited # from the parent process are irrelevant here. @@ -270,7 +233,7 @@ class MergeProcess(SpawnProcess): if self._elog_reg_id is not None: self.scheduler.unregister(self._elog_reg_id) self._elog_reg_id = None - if self._elog_reader_fd: + if self._elog_reader_fd is not None: os.close(self._elog_reader_fd) self._elog_reader_fd = None if self._elog_keys is not None: diff --git a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyo b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyo Binary files differnew file mode 100644 index 0000000..5839ad8 --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyo diff --git a/portage_with_autodep/pym/portage/dbapi/_SyncfsProcess.py b/portage_with_autodep/pym/portage/dbapi/_SyncfsProcess.py new file mode 100644 index 0000000..7518214 --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/_SyncfsProcess.py @@ -0,0 +1,53 @@ +# Copyright 2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +from portage import os +from portage.util._ctypes import find_library, LoadLibrary +from portage.util._async.ForkProcess import ForkProcess + +class SyncfsProcess(ForkProcess): + """ + Isolate ctypes usage in a subprocess, in order to avoid + potential problems with stale cached libraries as + described in bug #448858, comment #14 (also see + http://bugs.python.org/issue14597). + """ + + __slots__ = ('paths',) + + @staticmethod + def _get_syncfs(): + + filename = find_library("c") + if filename is not None: + library = LoadLibrary(filename) + if library is not None: + try: + return library.syncfs + except AttributeError: + pass + + return None + + def _run(self): + + syncfs_failed = False + syncfs = self._get_syncfs() + + if syncfs is not None: + for path in self.paths: + try: + fd = os.open(path, os.O_RDONLY) + except OSError: + pass + else: + try: + if syncfs(fd) != 0: + # Happens with PyPy (bug #446610) + syncfs_failed = True + finally: + os.close(fd) + + if syncfs is None or syncfs_failed: + return 1 + return os.EX_OK diff --git a/portage_with_autodep/pym/portage/dbapi/__init__.py b/portage_with_autodep/pym/portage/dbapi/__init__.py index e386faa..a1c5c56 100644 --- a/portage_with_autodep/pym/portage/dbapi/__init__.py +++ b/portage_with_autodep/pym/portage/dbapi/__init__.py @@ -1,4 +1,4 @@ -# Copyright 1998-2011 Gentoo Foundation +# Copyright 1998-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ["dbapi"] @@ -11,7 +11,7 @@ portage.proxy.lazyimport.lazyimport(globals(), 'portage.dep:match_from_list', 'portage.output:colorize', 'portage.util:cmp_sort_key,writemsg', - 'portage.versions:catsplit,catpkgsplit,vercmp', + 'portage.versions:catsplit,catpkgsplit,vercmp,_pkg_str', ) from portage import os @@ -46,7 +46,12 @@ class dbapi(object): def cp_list(self, cp, use_cache=1): raise NotImplementedError(self) - def _cpv_sort_ascending(self, cpv_list): + @staticmethod + def _cmp_cpv(cpv1, cpv2): + return vercmp(cpv1.version, cpv2.version) + + @staticmethod + def _cpv_sort_ascending(cpv_list): """ Use this to sort self.cp_list() results in ascending order. It sorts in place and returns None. @@ -55,12 +60,7 @@ class dbapi(object): # If the cpv includes explicit -r0, it has to be preserved # for consistency in findname and aux_get calls, so use a # dict to map strings back to their original values. - ver_map = {} - for cpv in cpv_list: - ver_map[cpv] = '-'.join(catpkgsplit(cpv)[2:]) - def cmp_cpv(cpv1, cpv2): - return vercmp(ver_map[cpv1], ver_map[cpv2]) - cpv_list.sort(key=cmp_sort_key(cmp_cpv)) + cpv_list.sort(key=cmp_sort_key(dbapi._cmp_cpv)) def cpv_all(self): """Return all CPVs in the db @@ -155,64 +155,74 @@ class dbapi(object): 2) Check enabled/disabled flag states. """ - iuse_implicit_match = self.settings._iuse_implicit_match + aux_keys = ["IUSE", "SLOT", "USE", "repository"] for cpv in cpv_iter: try: - iuse, slot, use = self.aux_get(cpv, ["IUSE", "SLOT", "USE"], myrepo=atom.repo) + metadata = dict(zip(aux_keys, + self.aux_get(cpv, aux_keys, myrepo=atom.repo))) except KeyError: continue - iuse = frozenset(x.lstrip('+-') for x in iuse.split()) - missing_iuse = False - for x in atom.unevaluated_atom.use.required: - if x not in iuse and not iuse_implicit_match(x): - missing_iuse = True - break - if missing_iuse: + + if not self._match_use(atom, cpv, metadata): continue - if not atom.use: - pass - elif not self._use_mutable: - # Use IUSE to validate USE settings for built packages, - # in case the package manager that built this package - # failed to do that for some reason (or in case of - # data corruption). - use = frozenset(x for x in use.split() if x in iuse or \ - iuse_implicit_match(x)) - missing_enabled = atom.use.missing_enabled.difference(iuse) - missing_disabled = atom.use.missing_disabled.difference(iuse) - - if atom.use.enabled: - if atom.use.enabled.intersection(missing_disabled): - continue - need_enabled = atom.use.enabled.difference(use) + + yield cpv + + def _match_use(self, atom, cpv, metadata): + iuse_implicit_match = self.settings._iuse_implicit_match + iuse = frozenset(x.lstrip('+-') for x in metadata["IUSE"].split()) + + for x in atom.unevaluated_atom.use.required: + if x not in iuse and not iuse_implicit_match(x): + return False + + if atom.use is None: + pass + + elif not self._use_mutable: + # Use IUSE to validate USE settings for built packages, + # in case the package manager that built this package + # failed to do that for some reason (or in case of + # data corruption). + use = frozenset(x for x in metadata["USE"].split() + if x in iuse or iuse_implicit_match(x)) + missing_enabled = atom.use.missing_enabled.difference(iuse) + missing_disabled = atom.use.missing_disabled.difference(iuse) + + if atom.use.enabled: + if atom.use.enabled.intersection(missing_disabled): + return False + need_enabled = atom.use.enabled.difference(use) + if need_enabled: + need_enabled = need_enabled.difference(missing_enabled) if need_enabled: - need_enabled = need_enabled.difference(missing_enabled) - if need_enabled: - continue + return False - if atom.use.disabled: - if atom.use.disabled.intersection(missing_enabled): - continue - need_disabled = atom.use.disabled.intersection(use) + if atom.use.disabled: + if atom.use.disabled.intersection(missing_enabled): + return False + need_disabled = atom.use.disabled.intersection(use) + if need_disabled: + need_disabled = need_disabled.difference(missing_disabled) if need_disabled: - need_disabled = need_disabled.difference(missing_disabled) - if need_disabled: - continue - else: - # Check masked and forced flags for repoman. - mysettings = getattr(self, 'settings', None) - if mysettings is not None and not mysettings.local_config: + return False - pkg = "%s:%s" % (cpv, slot) - usemask = mysettings._getUseMask(pkg) - if usemask.intersection(atom.use.enabled): - continue + elif not self.settings.local_config: + # Check masked and forced flags for repoman. + if hasattr(cpv, 'slot'): + pkg = cpv + else: + pkg = _pkg_str(cpv, slot=metadata["SLOT"], + repo=metadata.get("repository")) + usemask = self.settings._getUseMask(pkg) + if usemask.intersection(atom.use.enabled): + return False - useforce = mysettings._getUseForce(pkg).difference(usemask) - if useforce.intersection(atom.use.disabled): - continue + useforce = self.settings._getUseForce(pkg).difference(usemask) + if useforce.intersection(atom.use.disabled): + return False - yield cpv + return True def invalidentry(self, mypath): if '/-MERGING-' in mypath: diff --git a/portage_with_autodep/pym/portage/dbapi/__init__.pyo b/portage_with_autodep/pym/portage/dbapi/__init__.pyo Binary files differnew file mode 100644 index 0000000..e7b494d --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/__init__.pyo diff --git a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py index 6d6a27d..d379b4c 100644 --- a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py +++ b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py @@ -63,7 +63,8 @@ def expand_new_virt(vardb, atom): success, atoms = portage.dep_check(rdepend, None, vardb.settings, myuse=valid_use, - myroot=vardb.root, trees={vardb.root:{"porttree":vardb.vartree, + myroot=vardb.settings['EROOT'], + trees={vardb.settings['EROOT']:{"porttree":vardb.vartree, "vartree":vardb.vartree}}) if success: diff --git a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyo b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyo Binary files differnew file mode 100644 index 0000000..6c23a7e --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyo diff --git a/portage_with_autodep/pym/portage/dbapi/_similar_name_search.py b/portage_with_autodep/pym/portage/dbapi/_similar_name_search.py new file mode 100644 index 0000000..b6e4a1f --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/_similar_name_search.py @@ -0,0 +1,57 @@ +# Copyright 2011-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import difflib + +from portage.versions import catsplit + +def similar_name_search(dbs, atom): + + cp_lower = atom.cp.lower() + cat, pkg = catsplit(cp_lower) + if cat == "null": + cat = None + + all_cp = set() + for db in dbs: + all_cp.update(db.cp_all()) + + # discard dir containing no ebuilds + all_cp.discard(atom.cp) + + orig_cp_map = {} + for cp_orig in all_cp: + orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig) + all_cp = set(orig_cp_map) + + if cat: + matches = difflib.get_close_matches(cp_lower, all_cp) + else: + pkg_to_cp = {} + for other_cp in list(all_cp): + other_pkg = catsplit(other_cp)[1] + if other_pkg == pkg: + # Check for non-identical package that + # differs only by upper/lower case. + identical = True + for cp_orig in orig_cp_map[other_cp]: + if catsplit(cp_orig)[1] != \ + catsplit(atom.cp)[1]: + identical = False + break + if identical: + # discard dir containing no ebuilds + all_cp.discard(other_cp) + continue + pkg_to_cp.setdefault(other_pkg, set()).add(other_cp) + + pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp) + matches = [] + for pkg_match in pkg_matches: + matches.extend(pkg_to_cp[pkg_match]) + + matches_orig_case = [] + for cp in matches: + matches_orig_case.extend(orig_cp_map[cp]) + + return matches_orig_case diff --git a/portage_with_autodep/pym/portage/dbapi/bintree.py b/portage_with_autodep/pym/portage/dbapi/bintree.py index 62fc623..a8027ee 100644 --- a/portage_with_autodep/pym/portage/dbapi/bintree.py +++ b/portage_with_autodep/pym/portage/dbapi/bintree.py @@ -1,4 +1,4 @@ -# Copyright 1998-2011 Gentoo Foundation +# Copyright 1998-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ["bindbapi", "binarytree"] @@ -11,19 +11,20 @@ portage.proxy.lazyimport.lazyimport(globals(), 'portage.output:EOutput,colorize', 'portage.locks:lockfile,unlockfile', 'portage.package.ebuild.doebuild:_vdb_use_conditional_atoms', - 'portage.package.ebuild.fetch:_check_distfile', + 'portage.package.ebuild.fetch:_check_distfile,_hide_url_passwd', 'portage.update:update_dbentries', 'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \ 'writemsg,writemsg_stdout', 'portage.util.listdir:listdir', - 'portage.versions:best,catpkgsplit,catsplit', + 'portage.util._urlopen:urlopen@_urlopen', + 'portage.versions:best,catpkgsplit,catsplit,_pkg_str', ) from portage.cache.mappings import slot_dict_class from portage.const import CACHE_PATH from portage.dbapi.virtual import fakedbapi from portage.dep import Atom, use_reduce, paren_enclose -from portage.exception import AlarmSignal, InvalidPackageName, \ +from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \ PermissionDenied, PortageException from portage.localization import _ from portage import _movefile @@ -35,19 +36,17 @@ from portage import _unicode_encode import codecs import errno import io -import re import stat import subprocess import sys import tempfile import textwrap +import warnings from itertools import chain try: from urllib.parse import urlparse - from urllib.request import urlopen as urllib_request_urlopen except ImportError: from urlparse import urlparse - from urllib import urlopen as urllib_request_urlopen if sys.hexversion >= 0x3000000: basestring = str @@ -67,7 +66,7 @@ class bindbapi(fakedbapi): ["BUILD_TIME", "CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES", - "REQUIRED_USE"]) + ]) self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys) self._aux_cache = {} @@ -177,6 +176,34 @@ class bindbapi(fakedbapi): self.bintree.populate() return fakedbapi.cpv_all(self) + def getfetchsizes(self, pkg): + """ + This will raise MissingSignature if SIZE signature is not available, + or InvalidSignature if SIZE signature is invalid. + """ + + if not self.bintree.populated: + self.bintree.populate() + + pkg = getattr(pkg, 'cpv', pkg) + + filesdict = {} + if not self.bintree.isremote(pkg): + pass + else: + metadata = self.bintree._remotepkgs[pkg] + try: + size = int(metadata["SIZE"]) + except KeyError: + raise portage.exception.MissingSignature("SIZE") + except ValueError: + raise portage.exception.InvalidSignature( + "SIZE: %s" % metadata["SIZE"]) + else: + filesdict[os.path.basename(self.bintree.getname(pkg))] = size + + return filesdict + def _pkgindex_cpv_map_latest_build(pkgindex): """ Given a PackageIndex instance, create a dict of cpv -> metadata map. @@ -185,13 +212,20 @@ def _pkgindex_cpv_map_latest_build(pkgindex): @param pkgindex: A PackageIndex instance. @type pkgindex: PackageIndex @rtype: dict - @returns: a dict containing entry for the give cpv. + @return: a dict containing entry for the give cpv. """ cpv_map = {} for d in pkgindex.packages: cpv = d["CPV"] + try: + cpv = _pkg_str(cpv) + except InvalidData: + writemsg(_("!!! Invalid remote binary package: %s\n") % cpv, + noiselevel=-1) + continue + btime = d.get('BUILD_TIME', '') try: btime = int(btime) @@ -208,16 +242,35 @@ def _pkgindex_cpv_map_latest_build(pkgindex): if other_btime and (not btime or other_btime > btime): continue - cpv_map[cpv] = d + cpv_map[_pkg_str(cpv)] = d return cpv_map class binarytree(object): "this tree scans for a list of all packages available in PKGDIR" - def __init__(self, root, pkgdir, virtual=None, settings=None): + def __init__(self, _unused=None, pkgdir=None, + virtual=DeprecationWarning, settings=None): + + if pkgdir is None: + raise TypeError("pkgdir parameter is required") + + if settings is None: + raise TypeError("settings parameter is required") + + if _unused is not None and _unused != settings['ROOT']: + warnings.warn("The root parameter of the " + "portage.dbapi.bintree.binarytree" + " constructor is now unused. Use " + "settings['ROOT'] instead.", + DeprecationWarning, stacklevel=2) + + if virtual is not DeprecationWarning: + warnings.warn("The 'virtual' parameter of the " + "portage.dbapi.bintree.binarytree" + " constructor is unused", + DeprecationWarning, stacklevel=2) + if True: - self.root = root - #self.pkgdir=settings["PKGDIR"] self.pkgdir = normalize_path(pkgdir) self.dbapi = bindbapi(self, settings=settings) self.update_ents = self.dbapi.update_ents @@ -242,7 +295,7 @@ class binarytree(object): ["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES", - "REQUIRED_USE", "BASE_URI"] + "BASE_URI"] self._pkgindex_aux_keys = list(self._pkgindex_aux_keys) self._pkgindex_use_evaluated_keys = \ ("LICENSE", "RDEPEND", "DEPEND", @@ -268,7 +321,6 @@ class binarytree(object): "SLOT" : "0", "USE" : "", "DEFINED_PHASES" : "", - "REQUIRED_USE" : "" } self._pkgindex_inherited_keys = ["CHOST", "repository"] @@ -302,6 +354,15 @@ class binarytree(object): chain(*self._pkgindex_translated_keys) )) + @property + def root(self): + warnings.warn("The root attribute of " + "portage.dbapi.bintree.binarytree" + " is deprecated. Use " + "settings['ROOT'] instead.", + DeprecationWarning, stacklevel=3) + return self.settings['ROOT'] + def move_ent(self, mylist, repo_match=None): if not self.populated: self.populate() @@ -603,6 +664,7 @@ class binarytree(object): if mycpv in pkg_paths: # discard duplicates (All/ is preferred) continue + mycpv = _pkg_str(mycpv) pkg_paths[mycpv] = mypath # update the path if the package has been moved oldpath = d.get("PATH") @@ -678,6 +740,7 @@ class binarytree(object): (mycpv, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1) continue + mycpv = _pkg_str(mycpv) pkg_paths[mycpv] = mypath self.dbapi.cpv_inject(mycpv) update_pkgindex = True @@ -787,7 +850,7 @@ class binarytree(object): # slash, so join manually... url = base_url.rstrip("/") + "/Packages" try: - f = urllib_request_urlopen(url) + f = _urlopen(url) except IOError: path = parsed_url.path.rstrip("/") + "/Packages" @@ -859,7 +922,7 @@ class binarytree(object): noiselevel=-1) except EnvironmentError as e: writemsg(_("\n\n!!! Error fetching binhost package" \ - " info from '%s'\n") % base_url) + " info from '%s'\n") % _hide_url_passwd(base_url)) writemsg("!!! %s\n\n" % str(e)) del e pkgindex = None @@ -935,7 +998,7 @@ class binarytree(object): writemsg_stdout("\n") writemsg_stdout( colorize("GOOD", _("Fetching bininfo from ")) + \ - re.sub(r'//(.+):.+@(.+)/', r'//\1:*password*@\2/', base_url) + "\n") + _hide_url_passwd(base_url) + "\n") remotepkgs = portage.getbinpkg.dir_get_metadata( base_url, chunk_size=chunk_size) @@ -947,7 +1010,12 @@ class binarytree(object): noiselevel=-1) continue mycat = mycat.strip() - fullpkg = mycat+"/"+mypkg[:-5] + try: + fullpkg = _pkg_str(mycat+"/"+mypkg[:-5]) + except InvalidData: + writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg, + noiselevel=-1) + continue if fullpkg in metadata: # When using this old protocol, comparison with the remote @@ -1101,7 +1169,7 @@ class binarytree(object): Performs checksums and evaluates USE flag conditionals. Raises InvalidDependString if necessary. @rtype: dict - @returns: a dict containing entry for the give cpv. + @return: a dict containing entry for the give cpv. """ pkg_path = self.getname(cpv) @@ -1307,7 +1375,7 @@ class binarytree(object): Verify digests for the given package and raise DigestException if verification fails. @rtype: bool - @returns: True if digests could be located, False otherwise. + @return: True if digests could be located, False otherwise. """ cpv = pkg if not isinstance(cpv, basestring): diff --git a/portage_with_autodep/pym/portage/dbapi/bintree.pyo b/portage_with_autodep/pym/portage/dbapi/bintree.pyo Binary files differnew file mode 100644 index 0000000..f99f377 --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/bintree.pyo diff --git a/portage_with_autodep/pym/portage/dbapi/cpv_expand.pyo b/portage_with_autodep/pym/portage/dbapi/cpv_expand.pyo Binary files differnew file mode 100644 index 0000000..cf1a428 --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/cpv_expand.pyo diff --git a/portage_with_autodep/pym/portage/dbapi/dep_expand.pyo b/portage_with_autodep/pym/portage/dbapi/dep_expand.pyo Binary files differnew file mode 100644 index 0000000..b323f5b --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/dep_expand.pyo diff --git a/portage_with_autodep/pym/portage/dbapi/porttree.py b/portage_with_autodep/pym/portage/dbapi/porttree.py index ecf275c..c5ee770 100644 --- a/portage_with_autodep/pym/portage/dbapi/porttree.py +++ b/portage_with_autodep/pym/portage/dbapi/porttree.py @@ -1,4 +1,4 @@ -# Copyright 1998-2011 Gentoo Foundation +# Copyright 1998-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = [ @@ -14,20 +14,19 @@ portage.proxy.lazyimport.lazyimport(globals(), 'portage.package.ebuild.doebuild:doebuild', 'portage.util:ensure_dirs,shlex_split,writemsg,writemsg_level', 'portage.util.listdir:listdir', - 'portage.versions:best,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp', + 'portage.versions:best,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp,_pkg_str', ) -from portage.cache import metadata_overlay, volatile +from portage.cache import volatile from portage.cache.cache_errors import CacheError from portage.cache.mappings import Mapping from portage.dbapi import dbapi from portage.exception import PortageException, \ FileNotFound, InvalidAtom, InvalidDependString, InvalidPackageName from portage.localization import _ -from portage.manifest import Manifest -from portage import eclass_cache, auxdbkeys, \ - eapi_is_supported, dep_check, \ +from portage import eclass_cache, \ + eapi_is_supported, \ _eapi_is_deprecated from portage import os from portage import _encodings @@ -37,8 +36,6 @@ from _emerge.EbuildMetadataPhase import EbuildMetadataPhase from _emerge.PollScheduler import PollScheduler import os as _os -import io -import stat import sys import traceback import warnings @@ -47,15 +44,6 @@ if sys.hexversion >= 0x3000000: basestring = str long = int -class _repo_info(object): - __slots__ = ('name', 'path', 'eclass_db', 'portdir', 'portdir_overlay') - def __init__(self, name, path, eclass_db): - self.name = name - self.path = path - self.eclass_db = eclass_db - self.portdir = eclass_db.porttrees[0] - self.portdir_overlay = ' '.join(eclass_db.porttrees[1:]) - class portdbapi(dbapi): """this tree will scan a portage directory located at root (passed to init)""" portdbapi_instances = [] @@ -69,6 +57,13 @@ class portdbapi(dbapi): def porttree_root(self): return self.settings.repositories.mainRepoLocation() + @property + def eclassdb(self): + main_repo = self.repositories.mainRepo() + if main_repo is None: + return None + return main_repo.eclass_db + def __init__(self, _unused_param=None, mysettings=None): """ @param _unused_param: deprecated, use mysettings['PORTDIR'] instead @@ -100,6 +95,7 @@ class portdbapi(dbapi): # this purpose because doebuild makes many changes to the config # instance that is passed in. self.doebuild_settings = config(clone=self.settings) + self._scheduler = PollScheduler().sched_iface self.depcachedir = os.path.realpath(self.settings.depcachedir) if os.environ.get("SANDBOX_ON") == "1": @@ -112,7 +108,6 @@ class portdbapi(dbapi): ":".join(filter(None, sandbox_write)) self.porttrees = list(self.settings.repositories.repoLocationList()) - self.eclassdb = eclass_cache.cache(self.settings.repositories.mainRepoLocation()) # This is used as sanity check for aux_get(). If there is no # root eclass dir, we assume that PORTDIR is invalid or @@ -121,86 +116,74 @@ class portdbapi(dbapi): self._have_root_eclass_dir = os.path.isdir( os.path.join(self.settings.repositories.mainRepoLocation(), "eclass")) - self.metadbmodule = self.settings.load_best_module("portdbapi.metadbmodule") - #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening) self.xcache = {} self.frozen = 0 - #Create eclass dbs - self._repo_info = {} - eclass_dbs = {self.settings.repositories.mainRepoLocation() : self.eclassdb} - for repo in self.repositories: - if repo.location in self._repo_info: - continue - - eclass_db = None - for eclass_location in repo.eclass_locations: - tree_db = eclass_dbs.get(eclass_location) - if tree_db is None: - tree_db = eclass_cache.cache(eclass_location) - eclass_dbs[eclass_location] = tree_db - if eclass_db is None: - eclass_db = tree_db.copy() - else: - eclass_db.append(tree_db) - - self._repo_info[repo.location] = _repo_info(repo.name, repo.location, eclass_db) - #Keep a list of repo names, sorted by priority (highest priority first). self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order)) self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule") self.auxdb = {} self._pregen_auxdb = {} + # If the current user doesn't have depcachedir write permission, + # then the depcachedir cache is kept here read-only access. + self._ro_auxdb = {} self._init_cache_dirs() - depcachedir_w_ok = os.access(self.depcachedir, os.W_OK) - cache_kwargs = { - 'gid' : portage_gid, - 'perms' : 0o664 - } - - # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys - # ~harring - filtered_auxdbkeys = [x for x in auxdbkeys if not x.startswith("UNUSED_0")] - filtered_auxdbkeys.sort() + try: + depcachedir_st = os.stat(self.depcachedir) + depcachedir_w_ok = os.access(self.depcachedir, os.W_OK) + except OSError: + depcachedir_st = None + depcachedir_w_ok = False + + cache_kwargs = {} + + depcachedir_unshared = False + if portage.data.secpass < 1 and \ + depcachedir_w_ok and \ + depcachedir_st is not None and \ + os.getuid() == depcachedir_st.st_uid and \ + os.getgid() == depcachedir_st.st_gid: + # If this user owns depcachedir and is not in the + # portage group, then don't bother to set permissions + # on cache entries. This makes it possible to run + # egencache without any need to be a member of the + # portage group. + depcachedir_unshared = True + else: + cache_kwargs.update({ + 'gid' : portage_gid, + 'perms' : 0o664 + }) + # If secpass < 1, we don't want to write to the cache # since then we won't be able to apply group permissions # to the cache entries/directories. - if secpass < 1 or not depcachedir_w_ok: + if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok: for x in self.porttrees: + self.auxdb[x] = volatile.database( + self.depcachedir, x, self._known_keys, + **cache_kwargs) try: - db_ro = self.auxdbmodule(self.depcachedir, x, - filtered_auxdbkeys, readonly=True, **cache_kwargs) + self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir, x, + self._known_keys, readonly=True, **cache_kwargs) except CacheError: - self.auxdb[x] = volatile.database( - self.depcachedir, x, filtered_auxdbkeys, - **cache_kwargs) - else: - self.auxdb[x] = metadata_overlay.database( - self.depcachedir, x, filtered_auxdbkeys, - db_rw=volatile.database, db_ro=db_ro, - **cache_kwargs) + pass else: for x in self.porttrees: if x in self.auxdb: continue # location, label, auxdbkeys self.auxdb[x] = self.auxdbmodule( - self.depcachedir, x, filtered_auxdbkeys, **cache_kwargs) - if self.auxdbmodule is metadata_overlay.database: - self.auxdb[x].db_ro.ec = self._repo_info[x].eclass_db + self.depcachedir, x, self._known_keys, **cache_kwargs) if "metadata-transfer" not in self.settings.features: for x in self.porttrees: if x in self._pregen_auxdb: continue - if os.path.isdir(os.path.join(x, "metadata", "cache")): - self._pregen_auxdb[x] = self.metadbmodule( - x, "metadata/cache", filtered_auxdbkeys, readonly=True) - try: - self._pregen_auxdb[x].ec = self._repo_info[x].eclass_db - except AttributeError: - pass + cache = self._create_pregen_cache(x) + if cache is not None: + self._pregen_auxdb[x] = cache # Selectively cache metadata in order to optimize dep matching. self._aux_cache_keys = set( ["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE", @@ -210,18 +193,28 @@ class portdbapi(dbapi): self._aux_cache = {} self._broken_ebuilds = set() + def _create_pregen_cache(self, tree): + conf = self.repositories.get_repo_for_location(tree) + cache = conf.get_pregenerated_cache( + self._known_keys, readonly=True) + if cache is not None: + try: + cache.ec = self.repositories.get_repo_for_location(tree).eclass_db + except AttributeError: + pass + return cache + def _init_cache_dirs(self): """Create /var/cache/edb/dep and adjust permissions for the portage group.""" dirmode = 0o2070 - filemode = 0o60 modemask = 0o2 try: ensure_dirs(self.depcachedir, gid=portage_gid, mode=dirmode, mask=modemask) - except PortageException as e: + except PortageException: pass def close_caches(self): @@ -260,7 +253,7 @@ class portdbapi(dbapi): @param canonical_repo_path: the canonical path of a repository, as resolved by os.path.realpath() @type canonical_repo_path: String - @returns: The repo_name for the corresponding repository, or None + @return: The repo_name for the corresponding repository, or None if the path does not correspond a known repository @rtype: String or None """ @@ -332,63 +325,33 @@ class portdbapi(dbapi): return (filename, x) return (None, 0) - def _metadata_process(self, cpv, ebuild_path, repo_path): - """ - Create an EbuildMetadataPhase instance to generate metadata for the - give ebuild. - @rtype: EbuildMetadataPhase - @returns: A new EbuildMetadataPhase instance, or None if the - metadata cache is already valid. - """ - metadata, st, emtime = self._pull_valid_cache(cpv, ebuild_path, repo_path) - if metadata is not None: - return None - - process = EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path, - ebuild_mtime=emtime, metadata_callback=self._metadata_callback, - portdb=self, repo_path=repo_path, settings=self.doebuild_settings) - return process - - def _metadata_callback(self, cpv, ebuild_path, repo_path, metadata, mtime): - - i = metadata - if hasattr(metadata, "items"): - i = iter(metadata.items()) - metadata = dict(i) - - if metadata.get("INHERITED", False): - metadata["_eclasses_"] = self._repo_info[repo_path - ].eclass_db.get_eclass_data(metadata["INHERITED"].split()) - else: - metadata["_eclasses_"] = {} - - metadata.pop("INHERITED", None) - metadata["_mtime_"] = mtime - - eapi = metadata.get("EAPI") - if not eapi or not eapi.strip(): - eapi = "0" - metadata["EAPI"] = eapi - if not eapi_is_supported(eapi): - for k in set(metadata).difference(("_mtime_", "_eclasses_")): - metadata[k] = "" - metadata["EAPI"] = "-" + eapi.lstrip("-") + def _write_cache(self, cpv, repo_path, metadata, ebuild_hash): try: - self.auxdb[repo_path][cpv] = metadata + cache = self.auxdb[repo_path] + chf = cache.validation_chf + metadata['_%s_' % chf] = getattr(ebuild_hash, chf) except CacheError: # Normally this shouldn't happen, so we'll show # a traceback for debugging purposes. traceback.print_exc() - return metadata + cache = None + + if cache is not None: + try: + cache[cpv] = metadata + except CacheError: + # Normally this shouldn't happen, so we'll show + # a traceback for debugging purposes. + traceback.print_exc() def _pull_valid_cache(self, cpv, ebuild_path, repo_path): try: - # Don't use unicode-wrapped os module, for better performance. - st = _os.stat(_unicode_encode(ebuild_path, - encoding=_encodings['fs'], errors='strict')) - emtime = st[stat.ST_MTIME] - except OSError: + ebuild_hash = eclass_cache.hashed_path(ebuild_path) + # snag mtime since we use it later, and to trigger stat failure + # if it doesn't exist + ebuild_hash.mtime + except FileNotFound: writemsg(_("!!! aux_get(): ebuild for " \ "'%s' does not exist at:\n") % (cpv,), noiselevel=-1) writemsg("!!! %s\n" % ebuild_path, noiselevel=-1) @@ -401,39 +364,39 @@ class portdbapi(dbapi): pregen_auxdb = self._pregen_auxdb.get(repo_path) if pregen_auxdb is not None: auxdbs.append(pregen_auxdb) + ro_auxdb = self._ro_auxdb.get(repo_path) + if ro_auxdb is not None: + auxdbs.append(ro_auxdb) auxdbs.append(self.auxdb[repo_path]) - eclass_db = self._repo_info[repo_path].eclass_db + eclass_db = self.repositories.get_repo_for_location(repo_path).eclass_db - doregen = True for auxdb in auxdbs: try: metadata = auxdb[cpv] except KeyError: - pass + continue except CacheError: - if auxdb is not pregen_auxdb: + if not auxdb.readonly: try: del auxdb[cpv] - except KeyError: - pass - except CacheError: + except (KeyError, CacheError): pass - else: - eapi = metadata.get('EAPI', '').strip() - if not eapi: - eapi = '0' - if not (eapi[:1] == '-' and eapi_is_supported(eapi[1:])) and \ - emtime == metadata['_mtime_'] and \ - eclass_db.is_eclass_data_valid(metadata['_eclasses_']): - doregen = False - - if not doregen: + continue + eapi = metadata.get('EAPI', '').strip() + if not eapi: + eapi = '0' + metadata['EAPI'] = eapi + if not eapi_is_supported(eapi): + # Since we're supposed to be able to efficiently obtain the + # EAPI from _parse_eapi_ebuild_head, we disregard cache entries + # for unsupported EAPIs. + continue + if auxdb.validate_entry(metadata, ebuild_hash, eclass_db): break - - if doregen: + else: metadata = None - return (metadata, st, emtime) + return (metadata, ebuild_hash) def aux_get(self, mycpv, mylist, mytree=None, myrepo=None): "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc." @@ -445,15 +408,22 @@ class portdbapi(dbapi): if mytree is None: raise KeyError(myrepo) - if not mytree: + if mytree is not None and len(self.porttrees) == 1 \ + and mytree == self.porttrees[0]: + # mytree matches our only tree, so it's safe to + # ignore mytree and cache the result + mytree = None + myrepo = None + + if mytree is None: cache_me = True - if not mytree and not self._known_keys.intersection( + if mytree is None and not self._known_keys.intersection( mylist).difference(self._aux_cache_keys): aux_cache = self._aux_cache.get(mycpv) if aux_cache is not None: return [aux_cache.get(x, "") for x in mylist] cache_me = True - global auxdbkeys, auxdbkeylen + try: cat, pkg = mycpv.split("/", 1) except ValueError: @@ -467,60 +437,35 @@ class portdbapi(dbapi): _("ebuild not found for '%s'") % mycpv, noiselevel=1) raise KeyError(mycpv) - mydata, st, emtime = self._pull_valid_cache(mycpv, myebuild, mylocation) + mydata, ebuild_hash = self._pull_valid_cache(mycpv, myebuild, mylocation) doregen = mydata is None if doregen: if myebuild in self._broken_ebuilds: raise KeyError(mycpv) - self.doebuild_settings.setcpv(mycpv) - eapi = None - - if eapi is None and \ - 'parse-eapi-ebuild-head' in self.doebuild_settings.features: - eapi = portage._parse_eapi_ebuild_head(io.open( - _unicode_encode(myebuild, - encoding=_encodings['fs'], errors='strict'), - mode='r', encoding=_encodings['repo.content'], - errors='replace')) - - if eapi is not None: - self.doebuild_settings.configdict['pkg']['EAPI'] = eapi - - if eapi is not None and not portage.eapi_is_supported(eapi): - mydata = self._metadata_callback( - mycpv, myebuild, mylocation, {'EAPI':eapi}, emtime) - else: - proc = EbuildMetadataPhase(cpv=mycpv, ebuild_path=myebuild, - ebuild_mtime=emtime, - metadata_callback=self._metadata_callback, portdb=self, - repo_path=mylocation, - scheduler=PollScheduler().sched_iface, - settings=self.doebuild_settings) + proc = EbuildMetadataPhase(cpv=mycpv, + ebuild_hash=ebuild_hash, portdb=self, + repo_path=mylocation, scheduler=self._scheduler, + settings=self.doebuild_settings) - proc.start() - proc.wait() + proc.start() + proc.wait() - if proc.returncode != os.EX_OK: - self._broken_ebuilds.add(myebuild) - raise KeyError(mycpv) + if proc.returncode != os.EX_OK: + self._broken_ebuilds.add(myebuild) + raise KeyError(mycpv) - mydata = proc.metadata + mydata = proc.metadata - # do we have a origin repository name for the current package mydata["repository"] = self.repositories.get_name_for_location(mylocation) - mydata["INHERITED"] = ' '.join(mydata.get("_eclasses_", [])) - mydata["_mtime_"] = st[stat.ST_MTIME] - + mydata["_mtime_"] = ebuild_hash.mtime eapi = mydata.get("EAPI") if not eapi: eapi = "0" mydata["EAPI"] = eapi - if not eapi_is_supported(eapi): - for k in set(mydata).difference(("_mtime_", "_eclasses_")): - mydata[k] = "" - mydata["EAPI"] = "-" + eapi.lstrip("-") + if eapi_is_supported(eapi): + mydata["INHERITED"] = " ".join(mydata.get("_eclasses_", [])) #finally, we look at our internal cache entry and return the requested data. returnme = [mydata.get(x, "") for x in mylist] @@ -546,7 +491,7 @@ class portdbapi(dbapi): @param mytree: The canonical path of the tree in which the ebuild is located, or None for automatic lookup @type mypkg: String - @returns: A dict which maps each file name to a set of alternative + @return: A dict which maps each file name to a set of alternative URIs. @rtype: dict """ @@ -565,7 +510,7 @@ class portdbapi(dbapi): # since callers already handle it. raise portage.exception.InvalidDependString( "getFetchMap(): '%s' has unsupported EAPI: '%s'" % \ - (mypkg, eapi.lstrip("-"))) + (mypkg, eapi)) return _parse_uri_map(mypkg, {'EAPI':eapi,'SRC_URI':myuris}, use=useflags) @@ -576,7 +521,9 @@ class portdbapi(dbapi): if myebuild is None: raise AssertionError(_("ebuild not found for '%s'") % mypkg) pkgdir = os.path.dirname(myebuild) - mf = Manifest(pkgdir, self.settings["DISTDIR"]) + mf = self.repositories.get_repo_for_location( + os.path.dirname(os.path.dirname(pkgdir))).load_manifest( + pkgdir, self.settings["DISTDIR"]) checksums = mf.getDigests() if not checksums: if debug: @@ -597,7 +544,7 @@ class portdbapi(dbapi): mystat = None try: mystat = os.stat(file_path) - except OSError as e: + except OSError: pass if mystat is None: existing_size = 0 @@ -644,7 +591,9 @@ class portdbapi(dbapi): if myebuild is None: raise AssertionError(_("ebuild not found for '%s'") % mypkg) pkgdir = os.path.dirname(myebuild) - mf = Manifest(pkgdir, self.settings["DISTDIR"]) + mf = self.repositories.get_repo_for_location( + os.path.dirname(os.path.dirname(pkgdir))) + mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"]) mysums = mf.getDigests() failures = {} @@ -706,15 +655,22 @@ class portdbapi(dbapi): return l def cp_list(self, mycp, use_cache=1, mytree=None): + # NOTE: Cache can be safely shared with the match cache, since the + # match cache uses the result from dep_expand for the cache_key. + if self.frozen and mytree is not None \ + and len(self.porttrees) == 1 \ + and mytree == self.porttrees[0]: + # mytree matches our only tree, so it's safe to + # ignore mytree and cache the result + mytree = None + if self.frozen and mytree is None: cachelist = self.xcache["cp-list"].get(mycp) if cachelist is not None: # Try to propagate this to the match-all cache here for # repoman since he uses separate match-all caches for each - # profile (due to old-style virtuals). Do not propagate - # old-style virtuals since cp_list() doesn't expand them. - if not (not cachelist and mycp.startswith("virtual/")): - self.xcache["match-all"][mycp] = cachelist + # profile (due to differences in _get_implicit_iuse). + self.xcache["match-all"][(mycp, mycp)] = cachelist return cachelist[:] mysplit = mycp.split("/") invalid_category = mysplit[0] not in self._categories @@ -752,7 +708,7 @@ class portdbapi(dbapi): writemsg(_("\nInvalid ebuild version: %s\n") % \ os.path.join(oroot, mycp, x), noiselevel=-1) continue - d[mysplit[0]+"/"+pf] = None + d[_pkg_str(mysplit[0]+"/"+pf)] = None if invalid_category and d: writemsg(_("\n!!! '%s' has a category that is not listed in " \ "%setc/portage/categories\n") % \ @@ -766,14 +722,11 @@ class portdbapi(dbapi): if self.frozen and mytree is None: cachelist = mylist[:] self.xcache["cp-list"][mycp] = cachelist - # Do not propagate old-style virtuals since - # cp_list() doesn't expand them. - if not (not cachelist and mycp.startswith("virtual/")): - self.xcache["match-all"][mycp] = cachelist + self.xcache["match-all"][(mycp, mycp)] = cachelist return mylist def freeze(self): - for x in "bestmatch-visible", "cp-list", "list-visible", "match-all", \ + for x in "bestmatch-visible", "cp-list", "match-all", \ "match-all-cpv-only", "match-visible", "minimum-all", \ "minimum-visible": self.xcache[x]={} @@ -785,12 +738,12 @@ class portdbapi(dbapi): def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None): "caching match function; very trick stuff" - #if no updates are being made to the tree, we can consult our xcache... - if self.frozen: - try: - return self.xcache[level][origdep][:] - except KeyError: - pass + if level == "list-visible": + level = "match-visible" + warnings.warn("The 'list-visible' mode of " + "portage.dbapi.porttree.portdbapi.xmatch " + "has been renamed to match-visible", + DeprecationWarning, stacklevel=2) if mydep is None: #this stuff only runs on first call of xmatch() @@ -798,12 +751,24 @@ class portdbapi(dbapi): mydep = dep_expand(origdep, mydb=self, settings=self.settings) mykey = mydep.cp + #if no updates are being made to the tree, we can consult our xcache... + cache_key = None + if self.frozen: + cache_key = (mydep, mydep.unevaluated_atom) + try: + return self.xcache[level][cache_key][:] + except KeyError: + pass + myval = None mytree = None if mydep.repo is not None: mytree = self.treemap.get(mydep.repo) if mytree is None: - myval = [] + if level.startswith("match-"): + myval = [] + else: + myval = "" if myval is not None: # Unknown repo, empty result. @@ -822,27 +787,8 @@ class portdbapi(dbapi): myval = match_from_list(mydep, self.cp_list(mykey, mytree=mytree)) - elif level == "list-visible": - #a list of all visible packages, not called directly (just by xmatch()) - #myval = self.visible(self.cp_list(mykey)) - - myval = self.gvisible(self.visible( - self.cp_list(mykey, mytree=mytree))) - elif level == "minimum-all": - # Find the minimum matching version. This is optimized to - # minimize the number of metadata accesses (improves performance - # especially in cases where metadata needs to be generated). - if mydep == mykey: - cpv_iter = iter(self.cp_list(mykey, mytree=mytree)) - else: - cpv_iter = self._iter_match(mydep, - self.cp_list(mykey, mytree=mytree)) - try: - myval = next(cpv_iter) - except StopIteration: - myval = "" - - elif level in ("minimum-visible", "bestmatch-visible"): + elif level in ("bestmatch-visible", "match-all", "match-visible", + "minimum-all", "minimum-visible"): # Find the minimum matching visible version. This is optimized to # minimize the number of metadata accesses (improves performance # especially in cases where metadata needs to be generated). @@ -851,158 +797,172 @@ class portdbapi(dbapi): else: mylist = match_from_list(mydep, self.cp_list(mykey, mytree=mytree)) - myval = "" - settings = self.settings - local_config = settings.local_config + + visibility_filter = level not in ("match-all", "minimum-all") + single_match = level not in ("match-all", "match-visible") + myval = [] aux_keys = list(self._aux_cache_keys) - if level == "minimum-visible": + if level == "bestmatch-visible": + iterfunc = reversed + else: iterfunc = iter + + if mydep.repo is not None: + repos = [mydep.repo] else: - iterfunc = reversed + # We iterate over self.porttrees, since it's common to + # tweak this attribute in order to adjust match behavior. + repos = [] + for tree in reversed(self.porttrees): + repos.append(self.repositories.get_name_for_location(tree)) + for cpv in iterfunc(mylist): - try: - metadata = dict(zip(aux_keys, - self.aux_get(cpv, aux_keys))) - except KeyError: - # ebuild masked by corruption - continue - if not eapi_is_supported(metadata["EAPI"]): - continue - if mydep.slot and mydep.slot != metadata["SLOT"]: - continue - if settings._getMissingKeywords(cpv, metadata): - continue - if settings._getMaskAtom(cpv, metadata): - continue - if settings._getProfileMaskAtom(cpv, metadata): - continue - if local_config: - metadata["USE"] = "" - if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]: - self.doebuild_settings.setcpv(cpv, mydb=metadata) - metadata["USE"] = self.doebuild_settings.get("USE", "") + for repo in repos: try: - if settings._getMissingLicenses(cpv, metadata): - continue - if settings._getMissingProperties(cpv, metadata): - continue - except InvalidDependString: + metadata = dict(zip(aux_keys, + self.aux_get(cpv, aux_keys, myrepo=repo))) + except KeyError: + # ebuild not in this repo, or masked by corruption continue - if mydep.use: - has_iuse = False - for has_iuse in self._iter_match_use(mydep, [cpv]): - break - if not has_iuse: + + if visibility_filter and not self._visible(cpv, metadata): continue - myval = cpv - break + + if mydep.slot is not None and \ + mydep.slot != metadata["SLOT"]: + continue + + if mydep.unevaluated_atom.use is not None and \ + not self._match_use(mydep, cpv, metadata): + continue + + myval.append(cpv) + # only yield a given cpv once + break + + if myval and single_match: + break + + if single_match: + if myval: + myval = myval[0] + else: + myval = "" + elif level == "bestmatch-list": #dep match -- find best match but restrict search to sublist - #no point in calling xmatch again since we're not caching list deps - + warnings.warn("The 'bestmatch-list' mode of " + "portage.dbapi.porttree.portdbapi.xmatch is deprecated", + DeprecationWarning, stacklevel=2) myval = best(list(self._iter_match(mydep, mylist))) elif level == "match-list": #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible()) - + warnings.warn("The 'match-list' mode of " + "portage.dbapi.porttree.portdbapi.xmatch is deprecated", + DeprecationWarning, stacklevel=2) myval = list(self._iter_match(mydep, mylist)) - elif level == "match-visible": - #dep match -- find all visible matches - #get all visible packages, then get the matching ones - myval = list(self._iter_match(mydep, - self.xmatch("list-visible", mykey, mydep=Atom(mykey), mykey=mykey))) - elif level == "match-all": - #match *all* visible *and* masked packages - if mydep == mykey: - myval = self.cp_list(mykey, mytree=mytree) - else: - myval = list(self._iter_match(mydep, - self.cp_list(mykey, mytree=mytree))) else: raise AssertionError( "Invalid level argument: '%s'" % level) - if self.frozen and (level not in ["match-list", "bestmatch-list"]): - self.xcache[level][mydep] = myval - if origdep and origdep != mydep: - self.xcache[level][origdep] = myval - return myval[:] + if self.frozen: + xcache_this_level = self.xcache.get(level) + if xcache_this_level is not None: + xcache_this_level[cache_key] = myval + if not isinstance(myval, _pkg_str): + myval = myval[:] + + return myval def match(self, mydep, use_cache=1): return self.xmatch("match-visible", mydep) - def visible(self, mylist): - """two functions in one. Accepts a list of cpv values and uses the package.mask *and* - packages file to remove invisible entries, returning remaining items. This function assumes - that all entries in mylist have the same category and package name.""" - if not mylist: - return [] - - db_keys = ["SLOT"] - visible = [] - getMaskAtom = self.settings._getMaskAtom - getProfileMaskAtom = self.settings._getProfileMaskAtom - for cpv in mylist: - try: - metadata = dict(zip(db_keys, self.aux_get(cpv, db_keys))) - except KeyError: - # masked by corruption - continue - if not metadata["SLOT"]: - continue - if getMaskAtom(cpv, metadata): - continue - if getProfileMaskAtom(cpv, metadata): - continue - visible.append(cpv) - return visible - - def gvisible(self,mylist): - "strip out group-masked (not in current group) entries" + def gvisible(self, mylist): + warnings.warn("The 'gvisible' method of " + "portage.dbapi.porttree.portdbapi " + "is deprecated", + DeprecationWarning, stacklevel=2) + return list(self._iter_visible(iter(mylist))) - if mylist is None: + def visible(self, cpv_iter): + warnings.warn("The 'visible' method of " + "portage.dbapi.porttree.portdbapi " + "is deprecated", + DeprecationWarning, stacklevel=2) + if cpv_iter is None: return [] - newlist=[] + return list(self._iter_visible(iter(cpv_iter))) + + def _iter_visible(self, cpv_iter, myrepo=None): + """ + Return a new list containing only visible packages. + """ aux_keys = list(self._aux_cache_keys) metadata = {} - local_config = self.settings.local_config - chost = self.settings.get('CHOST', '') - accept_chost = self.settings._accept_chost - for mycpv in mylist: - metadata.clear() - try: - metadata.update(zip(aux_keys, self.aux_get(mycpv, aux_keys))) - except KeyError: - continue - except PortageException as e: - writemsg("!!! Error: aux_get('%s', %s)\n" % (mycpv, aux_keys), - noiselevel=-1) - writemsg("!!! %s\n" % (e,), noiselevel=-1) - del e - continue - eapi = metadata["EAPI"] - if not eapi_is_supported(eapi): - continue - if _eapi_is_deprecated(eapi): - continue - if self.settings._getMissingKeywords(mycpv, metadata): - continue - if local_config: - metadata['CHOST'] = chost - if not accept_chost(mycpv, metadata): - continue - metadata["USE"] = "" - if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]: - self.doebuild_settings.setcpv(mycpv, mydb=metadata) - metadata['USE'] = self.doebuild_settings['PORTAGE_USE'] + + if myrepo is not None: + repos = [myrepo] + else: + # We iterate over self.porttrees, since it's common to + # tweak this attribute in order to adjust match behavior. + repos = [] + for tree in reversed(self.porttrees): + repos.append(self.repositories.get_name_for_location(tree)) + + for mycpv in cpv_iter: + for repo in repos: + metadata.clear() try: - if self.settings._getMissingLicenses(mycpv, metadata): - continue - if self.settings._getMissingProperties(mycpv, metadata): - continue - except InvalidDependString: + metadata.update(zip(aux_keys, + self.aux_get(mycpv, aux_keys, myrepo=repo))) + except KeyError: + continue + except PortageException as e: + writemsg("!!! Error: aux_get('%s', %s)\n" % + (mycpv, aux_keys), noiselevel=-1) + writemsg("!!! %s\n" % (e,), noiselevel=-1) + del e continue - newlist.append(mycpv) - return newlist + + if not self._visible(mycpv, metadata): + continue + + yield mycpv + # only yield a given cpv once + break + + def _visible(self, cpv, metadata): + eapi = metadata["EAPI"] + if not eapi_is_supported(eapi): + return False + if _eapi_is_deprecated(eapi): + return False + if not metadata["SLOT"]: + return False + + settings = self.settings + if settings._getMaskAtom(cpv, metadata): + return False + if settings._getMissingKeywords(cpv, metadata): + return False + if settings.local_config: + metadata['CHOST'] = settings.get('CHOST', '') + if not settings._accept_chost(cpv, metadata): + return False + metadata["USE"] = "" + if "?" in metadata["LICENSE"] or \ + "?" in metadata["PROPERTIES"]: + self.doebuild_settings.setcpv(cpv, mydb=metadata) + metadata['USE'] = self.doebuild_settings['PORTAGE_USE'] + try: + if settings._getMissingLicenses(cpv, metadata): + return False + if settings._getMissingProperties(cpv, metadata): + return False + except InvalidDependString: + return False + + return True def close_portdbapi_caches(): for i in portdbapi.portdbapi_instances: @@ -1011,7 +971,7 @@ def close_portdbapi_caches(): portage.process.atexit_register(portage.portageexit) class portagetree(object): - def __init__(self, root=None, virtual=None, settings=None): + def __init__(self, root=None, virtual=DeprecationWarning, settings=None): """ Constructor for a PortageTree @@ -1034,8 +994,14 @@ class portagetree(object): "settings['ROOT'] instead.", DeprecationWarning, stacklevel=2) + if virtual is not DeprecationWarning: + warnings.warn("The 'virtual' parameter of the " + "portage.dbapi.porttree.portagetree" + " constructor is unused", + DeprecationWarning, stacklevel=2) + self.portroot = settings["PORTDIR"] - self.virtual = virtual + self.__virtual = virtual self.dbapi = portdbapi(mysettings=settings) @property @@ -1044,9 +1010,17 @@ class portagetree(object): "portage.dbapi.porttree.portagetree" + \ " is deprecated. Use " + \ "settings['ROOT'] instead.", - DeprecationWarning, stacklevel=2) + DeprecationWarning, stacklevel=3) return self.settings['ROOT'] + @property + def virtual(self): + warnings.warn("The 'virtual' attribute of " + \ + "portage.dbapi.porttree.portagetree" + \ + " is deprecated.", + DeprecationWarning, stacklevel=3) + return self.__virtual + def dep_bestmatch(self,mydep): "compatibility method" mymatch = self.dbapi.xmatch("bestmatch-visible",mydep) @@ -1077,17 +1051,14 @@ class portagetree(object): psplit = pkgsplit(mysplit[1]) return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild" - def depcheck(self, mycheck, use="yes", myusesplit=None): - return dep_check(mycheck, self.dbapi, use=use, myuse=myusesplit) - def getslot(self,mycatpkg): "Get a slot for a catpkg; assume it exists." myslot = "" try: myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0] - except SystemExit as e: + except SystemExit: raise - except Exception as e: + except Exception: pass return myslot @@ -1148,7 +1119,7 @@ def _parse_uri_map(cpv, metadata, use=None): while myuris: uri = myuris.pop() if myuris and myuris[-1] == "->": - operator = myuris.pop() + myuris.pop() distfile = myuris.pop() else: distfile = os.path.basename(uri) @@ -1163,6 +1134,5 @@ def _parse_uri_map(cpv, metadata, use=None): uri_map[distfile] = uri_set uri_set.add(uri) uri = None - operator = None return uri_map diff --git a/portage_with_autodep/pym/portage/dbapi/porttree.pyo b/portage_with_autodep/pym/portage/dbapi/porttree.pyo Binary files differnew file mode 100644 index 0000000..fb57919 --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/porttree.pyo diff --git a/portage_with_autodep/pym/portage/dbapi/vartree.py b/portage_with_autodep/pym/portage/dbapi/vartree.py index 7f7873b..517c873 100644 --- a/portage_with_autodep/pym/portage/dbapi/vartree.py +++ b/portage_with_autodep/pym/portage/dbapi/vartree.py @@ -1,4 +1,4 @@ -# Copyright 1998-2011 Gentoo Foundation +# Copyright 1998-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = [ @@ -18,7 +18,7 @@ portage.proxy.lazyimport.lazyimport(globals(), 'portage.locks:lockdir,unlockdir,lockfile,unlockfile', 'portage.output:bold,colorize', 'portage.package.ebuild.doebuild:doebuild_environment,' + \ - '_spawn_phase', + '_merge_unicode_error', '_spawn_phase', 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs', 'portage.update:fixdbentries', 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \ @@ -27,10 +27,13 @@ portage.proxy.lazyimport.lazyimport(globals(), 'portage.util.digraph:digraph', 'portage.util.env_update:env_update', 'portage.util.listdir:dircache,listdir', + 'portage.util.movefile:movefile', 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry', 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap', - 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \ - '_pkgsplit@pkgsplit', + 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \ + '_pkgsplit@pkgsplit,_pkg_str', + 'subprocess', + 'tarfile', ) from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \ @@ -41,12 +44,12 @@ from portage.exception import CommandNotFound, \ InvalidData, InvalidLocation, InvalidPackageName, \ FileNotFound, PermissionDenied, UnsupportedAPIException from portage.localization import _ -from portage.util.movefile import movefile from portage import abssymlink, _movefile, bsd_chflags # This is a special version of the os module, wrapped for unicode support. from portage import os +from portage import shutil from portage import _encodings from portage import _os_merge from portage import _selinux_merge @@ -60,13 +63,15 @@ from _emerge.PollScheduler import PollScheduler from _emerge.MiscFunctionsProcess import MiscFunctionsProcess import errno +import fnmatch import gc +import grp import io from itertools import chain import logging import os as _os +import pwd import re -import shutil import stat import sys import tempfile @@ -82,6 +87,9 @@ except ImportError: if sys.hexversion >= 0x3000000: basestring = str long = int + _unicode = str +else: + _unicode = unicode class vardbapi(dbapi): @@ -129,12 +137,11 @@ class vardbapi(dbapi): if settings is None: settings = portage.settings self.settings = settings - self.root = settings['ROOT'] - if _unused_param is not None and _unused_param != self.root: - warnings.warn("The first parameter of the " + \ - "portage.dbapi.vartree.vardbapi" + \ - " constructor is now unused. Use " + \ + if _unused_param is not None and _unused_param != settings['ROOT']: + warnings.warn("The first parameter of the " + "portage.dbapi.vartree.vardbapi" + " constructor is now unused. Use " "settings['ROOT'] instead.", DeprecationWarning, stacklevel=2) @@ -148,14 +155,14 @@ class vardbapi(dbapi): self._fs_lock_count = 0 if vartree is None: - vartree = portage.db[self.root]["vartree"] + vartree = portage.db[settings['EROOT']]['vartree'] self.vartree = vartree self._aux_cache_keys = set( ["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION", "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES", - "REQUIRED_USE"]) + ]) self._aux_cache_obj = None self._aux_cache_filename = os.path.join(self._eroot, CACHE_PATH, "vdb_metadata.pickle") @@ -164,7 +171,7 @@ class vardbapi(dbapi): self._plib_registry = None if _ENABLE_PRESERVE_LIBS: - self._plib_registry = PreservedLibsRegistry(self.root, + self._plib_registry = PreservedLibsRegistry(settings["ROOT"], os.path.join(self._eroot, PRIVATE_PATH, "preserved_libs_registry")) @@ -175,6 +182,15 @@ class vardbapi(dbapi): self._cached_counter = None + @property + def root(self): + warnings.warn("The root attribute of " + "portage.dbapi.vartree.vardbapi" + " is deprecated. Use " + "settings['ROOT'] instead.", + DeprecationWarning, stacklevel=3) + return self.settings['ROOT'] + def getpath(self, mykey, filename=None): # This is an optimized hotspot, so don't use unicode-wrapped # os module and don't use os.path.join(). @@ -373,7 +389,7 @@ class vardbapi(dbapi): continue if len(mysplit) > 1: if ps[0] == mysplit[1]: - returnme.append(mysplit[0]+"/"+x) + returnme.append(_pkg_str(mysplit[0]+"/"+x)) self._cpv_sort_ascending(returnme) if use_cache: self.cpcache[mycp] = [mystat, returnme[:]] @@ -472,6 +488,7 @@ class vardbapi(dbapi): "caching match function" mydep = dep_expand( origdep, mydb=self, use_cache=use_cache, settings=self.settings) + cache_key = (mydep, mydep.unevaluated_atom) mykey = dep_getkey(mydep) mycat = catsplit(mykey)[0] if not use_cache: @@ -493,8 +510,8 @@ class vardbapi(dbapi): if mydep not in self.matchcache[mycat]: mymatch = list(self._iter_match(mydep, self.cp_list(mydep.cp, use_cache=use_cache))) - self.matchcache[mycat][mydep] = mymatch - return self.matchcache[mycat][mydep][:] + self.matchcache[mycat][cache_key] = mymatch + return self.matchcache[mycat][cache_key][:] def findname(self, mycpv, myrepo=None): return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild") @@ -555,8 +572,11 @@ class vardbapi(dbapi): aux_cache = mypickle.load() f.close() del f - except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e: - if isinstance(e, pickle.UnpicklingError): + except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e: + if isinstance(e, EnvironmentError) and \ + getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES): + pass + else: writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \ (self._aux_cache_filename, e), noiselevel=-1) del e @@ -610,7 +630,8 @@ class vardbapi(dbapi): cache_these_wants.add(x) if not cache_these_wants: - return self._aux_get(mycpv, wants) + mydata = self._aux_get(mycpv, wants) + return [mydata[x] for x in wants] cache_these = set(self._aux_cache_keys) cache_these.update(cache_these_wants) @@ -655,16 +676,15 @@ class vardbapi(dbapi): if pull_me: # pull any needed data and cache it aux_keys = list(pull_me) - for k, v in zip(aux_keys, - self._aux_get(mycpv, aux_keys, st=mydir_stat)): - mydata[k] = v + mydata.update(self._aux_get(mycpv, aux_keys, st=mydir_stat)) if not cache_valid or cache_these.difference(metadata): cache_data = {} if cache_valid and metadata: cache_data.update(metadata) for aux_key in cache_these: cache_data[aux_key] = mydata[aux_key] - self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data) + self._aux_cache["packages"][_unicode(mycpv)] = \ + (mydir_mtime, cache_data) self._aux_cache["modified"].add(mycpv) if _slot_re.match(mydata['SLOT']) is None: @@ -688,10 +708,11 @@ class vardbapi(dbapi): raise if not stat.S_ISDIR(st.st_mode): raise KeyError(mycpv) - results = [] + results = {} + env_keys = [] for x in wants: if x == "_mtime_": - results.append(st[stat.ST_MTIME]) + results[x] = st[stat.ST_MTIME] continue try: myf = io.open( @@ -703,16 +724,103 @@ class vardbapi(dbapi): myd = myf.read() finally: myf.close() - # Preserve \n for metadata that is known to - # contain multiple lines. - if self._aux_multi_line_re.match(x) is None: - myd = " ".join(myd.split()) except IOError: + if x not in self._aux_cache_keys and \ + self._aux_cache_keys_re.match(x) is None: + env_keys.append(x) + continue myd = _unicode_decode('') - if x == "EAPI" and not myd: - results.append(_unicode_decode('0')) - else: - results.append(myd) + + # Preserve \n for metadata that is known to + # contain multiple lines. + if self._aux_multi_line_re.match(x) is None: + myd = " ".join(myd.split()) + + results[x] = myd + + if env_keys: + env_results = self._aux_env_search(mycpv, env_keys) + for k in env_keys: + v = env_results.get(k) + if v is None: + v = _unicode_decode('') + if self._aux_multi_line_re.match(k) is None: + v = " ".join(v.split()) + results[k] = v + + if results.get("EAPI") == "": + results[_unicode_decode("EAPI")] = _unicode_decode('0') + + return results + + def _aux_env_search(self, cpv, variables): + """ + Search environment.bz2 for the specified variables. Returns + a dict mapping variables to values, and any variables not + found in the environment will not be included in the dict. + This is useful for querying variables like ${SRC_URI} and + ${A}, which are not saved in separate files but are available + in environment.bz2 (see bug #395463). + """ + env_file = self.getpath(cpv, filename="environment.bz2") + if not os.path.isfile(env_file): + return {} + bunzip2_cmd = portage.util.shlex_split( + self.settings.get("PORTAGE_BUNZIP2_COMMAND", "")) + if not bunzip2_cmd: + bunzip2_cmd = portage.util.shlex_split( + self.settings["PORTAGE_BZIP2_COMMAND"]) + bunzip2_cmd.append("-d") + args = bunzip2_cmd + ["-c", env_file] + try: + proc = subprocess.Popen(args, stdout=subprocess.PIPE) + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + raise portage.exception.CommandNotFound(args[0]) + + # Parts of the following code are borrowed from + # filter-bash-environment.py (keep them in sync). + var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?(.*)$') + close_quote_re = re.compile(r'(\\"|"|\')\s*$') + def have_end_quote(quote, line): + close_quote_match = close_quote_re.search(line) + return close_quote_match is not None and \ + close_quote_match.group(1) == quote + + variables = frozenset(variables) + results = {} + for line in proc.stdout: + line = _unicode_decode(line, + encoding=_encodings['content'], errors='replace') + var_assign_match = var_assign_re.match(line) + if var_assign_match is not None: + key = var_assign_match.group(2) + quote = var_assign_match.group(3) + if quote is not None: + if have_end_quote(quote, + line[var_assign_match.end(2)+2:]): + value = var_assign_match.group(4) + else: + value = [var_assign_match.group(4)] + for line in proc.stdout: + line = _unicode_decode(line, + encoding=_encodings['content'], + errors='replace') + value.append(line) + if have_end_quote(quote, line): + break + value = ''.join(value) + # remove trailing quote and whitespace + value = value.rstrip()[:-1] + else: + value = var_assign_match.group(4).rstrip() + + if key in variables: + results[key] = value + + proc.wait() + proc.stdout.close() return results def aux_update(self, cpv, values): @@ -758,8 +866,7 @@ class vardbapi(dbapi): @param myroot: ignored, self._eroot is used instead """ - myroot = None - new_vdb = False + del myroot counter = -1 try: cfile = io.open( @@ -768,8 +875,9 @@ class vardbapi(dbapi): mode='r', encoding=_encodings['repo.content'], errors='replace') except EnvironmentError as e: - new_vdb = not bool(self.cpv_all()) - if not new_vdb: + # Silently allow ENOENT since files under + # /var/cache/ are allowed to disappear. + if e.errno != errno.ENOENT: writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \ self._counter_path, noiselevel=-1) writemsg("!!! %s\n" % str(e), noiselevel=-1) @@ -806,10 +914,6 @@ class vardbapi(dbapi): if pkg_counter > max_counter: max_counter = pkg_counter - if counter < 0 and not new_vdb: - writemsg(_("!!! Initializing COUNTER to " \ - "value of %d\n") % max_counter, noiselevel=-1) - return max_counter + 1 def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None): @@ -823,7 +927,7 @@ class vardbapi(dbapi): @param myroot: ignored, self._eroot is used instead @param mycpv: ignored @rtype: int - @returns: new counter value + @return: new counter value """ myroot = None mycpv = None @@ -959,7 +1063,7 @@ class vardbapi(dbapi): counter = int(counter) except ValueError: counter = 0 - return (cpv, counter, mtime) + return (_unicode(cpv), counter, mtime) class _owners_db(object): @@ -1149,24 +1253,38 @@ class vardbapi(dbapi): class vartree(object): "this tree will scan a var/db/pkg database located at root (passed to init)" - def __init__(self, root=None, virtual=None, categories=None, + def __init__(self, root=None, virtual=DeprecationWarning, categories=None, settings=None): if settings is None: settings = portage.settings - self.root = settings['ROOT'] - if root is not None and root != self.root: - warnings.warn("The 'root' parameter of the " + \ - "portage.dbapi.vartree.vartree" + \ - " constructor is now unused. Use " + \ + if root is not None and root != settings['ROOT']: + warnings.warn("The 'root' parameter of the " + "portage.dbapi.vartree.vartree" + " constructor is now unused. Use " "settings['ROOT'] instead.", DeprecationWarning, stacklevel=2) + if virtual is not DeprecationWarning: + warnings.warn("The 'virtual' parameter of the " + "portage.dbapi.vartree.vartree" + " constructor is unused", + DeprecationWarning, stacklevel=2) + self.settings = settings self.dbapi = vardbapi(settings=settings, vartree=self) self.populated = 1 + @property + def root(self): + warnings.warn("The root attribute of " + "portage.dbapi.vartree.vartree" + " is deprecated. Use " + "settings['ROOT'] instead.", + DeprecationWarning, stacklevel=3) + return self.settings['ROOT'] + def getpath(self, mykey, filename=None): return self.dbapi.getpath(mykey, filename=filename) @@ -1276,6 +1394,20 @@ class dblink(object): r')$' ) + # These files are generated by emerge, so we need to remove + # them when they are the only thing left in a directory. + _infodir_cleanup = frozenset(["dir", "dir.old"]) + + _ignored_unlink_errnos = ( + errno.EBUSY, errno.ENOENT, + errno.ENOTDIR, errno.EISDIR) + + _ignored_rmdir_errnos = ( + errno.EEXIST, errno.ENOTEMPTY, + errno.EBUSY, errno.ENOENT, + errno.ENOTDIR, errno.EISDIR, + errno.EPERM) + def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None, vartree=None, blockers=None, scheduler=None, pipe=None): """ @@ -1300,22 +1432,23 @@ class dblink(object): raise TypeError("settings argument is required") mysettings = settings - myroot = settings['ROOT'] + self._eroot = mysettings['EROOT'] self.cat = cat self.pkg = pkg self.mycpv = self.cat + "/" + self.pkg - self.mysplit = list(catpkgsplit(self.mycpv)[1:]) - self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0]) + if self.mycpv == settings.mycpv and \ + isinstance(settings.mycpv, _pkg_str): + self.mycpv = settings.mycpv + else: + self.mycpv = _pkg_str(self.mycpv) + self.mysplit = list(self.mycpv.cpv_split[1:]) + self.mysplit[0] = self.mycpv.cp self.treetype = treetype if vartree is None: - vartree = portage.db[myroot]["vartree"] + vartree = portage.db[self._eroot]["vartree"] self.vartree = vartree self._blockers = blockers self._scheduler = scheduler - - # WARNING: EROOT support is experimental and may be incomplete - # for cases in which EPREFIX is non-empty. - self._eroot = mysettings['EROOT'] self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH)) self.dbcatdir = self.dbroot+"/"+cat self.dbpkgdir = self.dbcatdir+"/"+pkg @@ -1324,14 +1457,14 @@ class dblink(object): self.settings = mysettings self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1" - self.myroot=myroot + self.myroot = self.settings['ROOT'] self._installed_instance = None self.contentscache = None self._contents_inodes = None self._contents_basenames = None self._linkmap_broken = False - self._md5_merge_map = {} - self._hash_key = (self.myroot, self.mycpv) + self._hardlink_merge_map = {} + self._hash_key = (self._eroot, self.mycpv) self._protect_obj = None self._pipe = pipe @@ -1610,7 +1743,7 @@ class dblink(object): PreservedLibsRegistry yet. @type preserve_paths: set @rtype: Integer - @returns: + @return: 1. os.EX_OK if everything went well. 2. return code of the failed phase (for prerm, postrm, cleanrm) """ @@ -1839,16 +1972,19 @@ class dblink(object): else: self.settings.pop("PORTAGE_LOG_FILE", None) - # Lock the config memory file to prevent symlink creation - # in merge_contents from overlapping with env-update. - self.vartree.dbapi._fs_lock() - try: - env_update(target_root=self.settings['ROOT'], - prev_mtimes=ldpath_mtimes, - contents=contents, env=self.settings.environ(), - writemsg_level=self._display_merge) - finally: - self.vartree.dbapi._fs_unlock() + env_update(target_root=self.settings['ROOT'], + prev_mtimes=ldpath_mtimes, + contents=contents, env=self.settings, + writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi) + + unmerge_with_replacement = preserve_paths is not None + if not unmerge_with_replacement: + # When there's a replacement package which calls us via treewalk, + # treewalk will automatically call _prune_plib_registry for us. + # Otherwise, we need to call _prune_plib_registry ourselves. + # Don't pass in the "unmerge=True" flag here, since that flag + # is intended to be used _prior_ to unmerge, not after. + self._prune_plib_registry() return os.EX_OK @@ -1871,6 +2007,10 @@ class dblink(object): log_path=log_path, background=background, level=level, noiselevel=noiselevel) + def _show_unmerge(self, zing, desc, file_type, file_name): + self._display_merge("%s %s %s %s\n" % \ + (zing, desc.ljust(8), file_type, file_name)) + def _unmerge_pkgfiles(self, pkgfiles, others_in_slot): """ @@ -1887,6 +2027,9 @@ class dblink(object): os = _os_merge perf_md5 = perform_md5 showMessage = self._display_merge + show_unmerge = self._show_unmerge + ignored_unlink_errnos = self._ignored_unlink_errnos + ignored_rmdir_errnos = self._ignored_rmdir_errnos if not pkgfiles: showMessage(_("No package files given... Grabbing a set.\n")) @@ -1904,9 +2047,6 @@ class dblink(object): settings=self.settings, vartree=self.vartree, treetype="vartree", pipe=self._pipe)) - dest_root = self._eroot - dest_root_len = len(dest_root) - 1 - cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file) stale_confmem = [] protected_symlinks = {} @@ -1922,14 +2062,6 @@ class dblink(object): #process symlinks second-to-last, directories last. mydirs = set() - ignored_unlink_errnos = ( - errno.EBUSY, errno.ENOENT, - errno.ENOTDIR, errno.EISDIR) - ignored_rmdir_errnos = ( - errno.EEXIST, errno.ENOTEMPTY, - errno.EBUSY, errno.ENOENT, - errno.ENOTDIR, errno.EISDIR, - errno.EPERM) modprotect = os.path.join(self._eroot, "lib/modules/") def unlink(file_name, lstatobj): @@ -1965,10 +2097,6 @@ class dblink(object): # Restore the parent flags we saved before unlinking bsd_chflags.chflags(parent_name, pflags) - def show_unmerge(zing, desc, file_type, file_name): - showMessage("%s %s %s %s\n" % \ - (zing, desc.ljust(8), file_type, file_name)) - unmerge_desc = {} unmerge_desc["cfgpro"] = _("cfgpro") unmerge_desc["replaced"] = _("replaced") @@ -1980,14 +2108,12 @@ class dblink(object): unmerge_desc["!mtime"] = _("!mtime") unmerge_desc["!obj"] = _("!obj") unmerge_desc["!sym"] = _("!sym") + unmerge_desc["!prefix"] = _("!prefix") real_root = self.settings['ROOT'] real_root_len = len(real_root) - 1 - eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1 + eroot = self.settings["EROOT"] - # These files are generated by emerge, so we need to remove - # them when they are the only thing left in a directory. - infodir_cleanup = frozenset(["dir", "dir.old"]) infodirs = frozenset(infodir for infodir in chain( self.settings.get("INFOPATH", "").split(":"), self.settings.get("INFODIR", "").split(":")) if infodir) @@ -2023,6 +2149,12 @@ class dblink(object): file_data = pkgfiles[objkey] file_type = file_data[0] + + # don't try to unmerge the prefix offset itself + if len(obj) <= len(eroot) or not obj.startswith(eroot): + show_unmerge("---", unmerge_desc["!prefix"], file_type, obj) + continue + statobj = None try: statobj = os.stat(obj) @@ -2216,78 +2348,13 @@ class dblink(object): elif pkgfiles[objkey][0] == "dev": show_unmerge("---", "", file_type, obj) - mydirs = sorted(mydirs) - mydirs.reverse() + self._unmerge_dirs(mydirs, infodirs_inodes, + protected_symlinks, unmerge_desc, unlink, os) + mydirs.clear() - for obj, inode_key in mydirs: - # Treat any directory named "info" as a candidate here, - # since it might have been in INFOPATH previously even - # though it may not be there now. - if inode_key in infodirs_inodes or \ - os.path.basename(obj) == "info": - try: - remaining = os.listdir(obj) - except OSError: - pass - else: - cleanup_info_dir = () - if remaining and \ - len(remaining) <= len(infodir_cleanup): - if not set(remaining).difference(infodir_cleanup): - cleanup_info_dir = remaining - - for child in cleanup_info_dir: - child = os.path.join(obj, child) - try: - lstatobj = os.lstat(child) - if stat.S_ISREG(lstatobj.st_mode): - unlink(child, lstatobj) - show_unmerge("<<<", "", "obj", child) - except EnvironmentError as e: - if e.errno not in ignored_unlink_errnos: - raise - del e - show_unmerge("!!!", "", "obj", child) - try: - if bsd_chflags: - lstatobj = os.lstat(obj) - if lstatobj.st_flags != 0: - bsd_chflags.lchflags(obj, 0) - parent_name = os.path.dirname(obj) - # Use normal stat/chflags for the parent since we want to - # follow any symlinks to the real parent directory. - pflags = os.stat(parent_name).st_flags - if pflags != 0: - bsd_chflags.chflags(parent_name, 0) - try: - os.rmdir(obj) - finally: - if bsd_chflags and pflags != 0: - # Restore the parent flags we saved before unlinking - bsd_chflags.chflags(parent_name, pflags) - show_unmerge("<<<", "", "dir", obj) - except EnvironmentError as e: - if e.errno not in ignored_rmdir_errnos: - raise - if e.errno != errno.ENOENT: - show_unmerge("---", unmerge_desc["!empty"], "dir", obj) - del e - else: - # When a directory is successfully removed, there's - # no need to protect symlinks that point to it. - unmerge_syms = protected_symlinks.pop(inode_key, None) - if unmerge_syms is not None: - for relative_path in unmerge_syms: - obj = os.path.join(real_root, - relative_path.lstrip(os.sep)) - try: - unlink(obj, os.lstat(obj)) - show_unmerge("<<<", "", "sym", obj) - except (OSError, IOError) as e: - if e.errno not in ignored_unlink_errnos: - raise - del e - show_unmerge("!!!", "", "sym", obj) + if protected_symlinks: + self._unmerge_protected_symlinks(others_in_slot, infodirs_inodes, + protected_symlinks, unmerge_desc, unlink, os) if protected_symlinks: msg = "One or more symlinks to directories have been " + \ @@ -2313,6 +2380,168 @@ class dblink(object): #remove self from vartree database so that our own virtual gets zapped if we're the last node self.vartree.zap(self.mycpv) + def _unmerge_protected_symlinks(self, others_in_slot, infodirs_inodes, + protected_symlinks, unmerge_desc, unlink, os): + + real_root = self.settings['ROOT'] + show_unmerge = self._show_unmerge + ignored_unlink_errnos = self._ignored_unlink_errnos + + flat_list = set() + flat_list.update(*protected_symlinks.values()) + flat_list = sorted(flat_list) + + for f in flat_list: + for dblnk in others_in_slot: + if dblnk.isowner(f): + # If another package in the same slot installed + # a file via a protected symlink, return early + # and don't bother searching for any other owners. + return + + msg = [] + msg.append("") + msg.append(_("Directory symlink(s) may need protection:")) + msg.append("") + + for f in flat_list: + msg.append("\t%s" % \ + os.path.join(real_root, f.lstrip(os.path.sep))) + + msg.append("") + msg.append(_("Searching all installed" + " packages for files installed via above symlink(s)...")) + msg.append("") + self._elog("elog", "postrm", msg) + + self.lockdb() + try: + owners = self.vartree.dbapi._owners.get_owners(flat_list) + self.vartree.dbapi.flush_cache() + finally: + self.unlockdb() + + for owner in list(owners): + if owner.mycpv == self.mycpv: + owners.pop(owner, None) + + if not owners: + msg = [] + msg.append(_("The above directory symlink(s) are all " + "safe to remove. Removing them now...")) + msg.append("") + self._elog("elog", "postrm", msg) + dirs = set() + for unmerge_syms in protected_symlinks.values(): + for relative_path in unmerge_syms: + obj = os.path.join(real_root, + relative_path.lstrip(os.sep)) + parent = os.path.dirname(obj) + while len(parent) > len(self._eroot): + try: + lstatobj = os.lstat(parent) + except OSError: + break + else: + dirs.add((parent, + (lstatobj.st_dev, lstatobj.st_ino))) + parent = os.path.dirname(parent) + try: + unlink(obj, os.lstat(obj)) + show_unmerge("<<<", "", "sym", obj) + except (OSError, IOError) as e: + if e.errno not in ignored_unlink_errnos: + raise + del e + show_unmerge("!!!", "", "sym", obj) + + protected_symlinks.clear() + self._unmerge_dirs(dirs, infodirs_inodes, + protected_symlinks, unmerge_desc, unlink, os) + dirs.clear() + + def _unmerge_dirs(self, dirs, infodirs_inodes, + protected_symlinks, unmerge_desc, unlink, os): + + show_unmerge = self._show_unmerge + infodir_cleanup = self._infodir_cleanup + ignored_unlink_errnos = self._ignored_unlink_errnos + ignored_rmdir_errnos = self._ignored_rmdir_errnos + real_root = self.settings['ROOT'] + + dirs = sorted(dirs) + dirs.reverse() + + for obj, inode_key in dirs: + # Treat any directory named "info" as a candidate here, + # since it might have been in INFOPATH previously even + # though it may not be there now. + if inode_key in infodirs_inodes or \ + os.path.basename(obj) == "info": + try: + remaining = os.listdir(obj) + except OSError: + pass + else: + cleanup_info_dir = () + if remaining and \ + len(remaining) <= len(infodir_cleanup): + if not set(remaining).difference(infodir_cleanup): + cleanup_info_dir = remaining + + for child in cleanup_info_dir: + child = os.path.join(obj, child) + try: + lstatobj = os.lstat(child) + if stat.S_ISREG(lstatobj.st_mode): + unlink(child, lstatobj) + show_unmerge("<<<", "", "obj", child) + except EnvironmentError as e: + if e.errno not in ignored_unlink_errnos: + raise + del e + show_unmerge("!!!", "", "obj", child) + try: + if bsd_chflags: + lstatobj = os.lstat(obj) + if lstatobj.st_flags != 0: + bsd_chflags.lchflags(obj, 0) + parent_name = os.path.dirname(obj) + # Use normal stat/chflags for the parent since we want to + # follow any symlinks to the real parent directory. + pflags = os.stat(parent_name).st_flags + if pflags != 0: + bsd_chflags.chflags(parent_name, 0) + try: + os.rmdir(obj) + finally: + if bsd_chflags and pflags != 0: + # Restore the parent flags we saved before unlinking + bsd_chflags.chflags(parent_name, pflags) + show_unmerge("<<<", "", "dir", obj) + except EnvironmentError as e: + if e.errno not in ignored_rmdir_errnos: + raise + if e.errno != errno.ENOENT: + show_unmerge("---", unmerge_desc["!empty"], "dir", obj) + del e + else: + # When a directory is successfully removed, there's + # no need to protect symlinks that point to it. + unmerge_syms = protected_symlinks.pop(inode_key, None) + if unmerge_syms is not None: + for relative_path in unmerge_syms: + obj = os.path.join(real_root, + relative_path.lstrip(os.sep)) + try: + unlink(obj, os.lstat(obj)) + show_unmerge("<<<", "", "sym", obj) + except (OSError, IOError) as e: + if e.errno not in ignored_unlink_errnos: + raise + del e + show_unmerge("!!!", "", "sym", obj) + def isowner(self, filename, destroot=None): """ Check if a file belongs to this package. This may @@ -2328,7 +2557,7 @@ class dblink(object): @param destroot: @type destroot: @rtype: Boolean - @returns: + @return: 1. True if this package owns the file. 2. False if this package does not own the file. """ @@ -2857,9 +3086,13 @@ class dblink(object): os = _os_merge - collision_ignore = set([normalize_path(myignore) for myignore in \ - portage.util.shlex_split( - self.settings.get("COLLISION_IGNORE", ""))]) + collision_ignore = [] + for x in portage.util.shlex_split( + self.settings.get("COLLISION_IGNORE", "")): + if os.path.isdir(os.path.join(self._eroot, x.lstrip(os.sep))): + x = normalize_path(x) + x += "/*" + collision_ignore.append(x) # For collisions with preserved libraries, the current package # will assume ownership and the libraries will be unregistered. @@ -2960,15 +3193,12 @@ class dblink(object): if not isowned and self.isprotected(full_path): isowned = True if not isowned: + f_match = full_path[len(self._eroot)-1:] stopmerge = True - if collision_ignore: - if f in collision_ignore: + for pattern in collision_ignore: + if fnmatch.fnmatch(f_match, pattern): stopmerge = False - else: - for myignore in collision_ignore: - if f.startswith(myignore + os.path.sep): - stopmerge = False - break + break if stopmerge: collisions.append(f) return collisions, symlink_collisions, plib_collisions @@ -3121,9 +3351,10 @@ class dblink(object): if isinstance(lines, basestring): lines = [lines] for line in lines: - fields = (funcname, phase, cpv, line.rstrip('\n')) - str_buffer.append(' '.join(fields)) - str_buffer.append('\n') + for line in line.split('\n'): + fields = (funcname, phase, cpv, line) + str_buffer.append(' '.join(fields)) + str_buffer.append('\n') if str_buffer: os.write(self._pipe, _unicode_encode(''.join(str_buffer))) @@ -3157,7 +3388,7 @@ class dblink(object): @param prev_mtimes: { Filename:mtime } mapping for env_update @type prev_mtimes: Dictionary @rtype: Boolean - @returns: + @return: 1. 0 on success 2. 1 on failure @@ -3192,17 +3423,22 @@ class dblink(object): pass continue + f = None try: - val = io.open(_unicode_encode( + f = io.open(_unicode_encode( os.path.join(inforoot, var_name), encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], - errors='replace').readline().strip() + errors='replace') + val = f.readline().strip() except EnvironmentError as e: if e.errno != errno.ENOENT: raise del e val = '' + finally: + if f is not None: + f.close() if var_name == 'SLOT': slot = val @@ -3226,10 +3462,6 @@ class dblink(object): if not os.path.exists(self.dbcatdir): ensure_dirs(self.dbcatdir) - otherversions = [] - for v in self.vartree.dbapi.cp_list(self.mysplit[0]): - otherversions.append(v.split("/")[1]) - cp = self.mysplit[0] slot_atom = "%s:%s" % (cp, slot) @@ -3270,22 +3502,49 @@ class dblink(object): max_dblnk = dblnk self._installed_instance = max_dblnk + if self.settings.get("INSTALL_MASK") or \ + "nodoc" in self.settings.features or \ + "noinfo" in self.settings.features or \ + "noman" in self.settings.features: + # Apply INSTALL_MASK before collision-protect, since it may + # be useful to avoid collisions in some scenarios. + phase = MiscFunctionsProcess(background=False, + commands=["preinst_mask"], phase="preinst", + scheduler=self._scheduler, settings=self.settings) + phase.start() + phase.wait() + # We check for unicode encoding issues after src_install. However, # the check must be repeated here for binary packages (it's # inexpensive since we call os.walk() here anyway). unicode_errors = [] + line_ending_re = re.compile('[\n\r]') + srcroot_len = len(srcroot) + ed_len = len(self.settings["ED"]) while True: unicode_error = False + eagain_error = False myfilelist = [] mylinklist = [] paths_with_newlines = [] - srcroot_len = len(srcroot) def onerror(e): raise - for parent, dirs, files in os.walk(srcroot, onerror=onerror): + walk_iter = os.walk(srcroot, onerror=onerror) + while True: + try: + parent, dirs, files = next(walk_iter) + except StopIteration: + break + except OSError as e: + if e.errno != errno.EAGAIN: + raise + # Observed with PyPy 1.8. + eagain_error = True + break + try: parent = _unicode_decode(parent, encoding=_encodings['merge'], errors='strict') @@ -3293,12 +3552,12 @@ class dblink(object): new_parent = _unicode_decode(parent, encoding=_encodings['merge'], errors='replace') new_parent = _unicode_encode(new_parent, - encoding=_encodings['merge'], errors='backslashreplace') + encoding='ascii', errors='backslashreplace') new_parent = _unicode_decode(new_parent, encoding=_encodings['merge'], errors='replace') os.rename(parent, new_parent) unicode_error = True - unicode_errors.append(new_parent[srcroot_len:]) + unicode_errors.append(new_parent[ed_len:]) break for fname in files: @@ -3311,13 +3570,13 @@ class dblink(object): new_fname = _unicode_decode(fname, encoding=_encodings['merge'], errors='replace') new_fname = _unicode_encode(new_fname, - encoding=_encodings['merge'], errors='backslashreplace') + encoding='ascii', errors='backslashreplace') new_fname = _unicode_decode(new_fname, encoding=_encodings['merge'], errors='replace') new_fpath = os.path.join(parent, new_fname) os.rename(fpath, new_fpath) unicode_error = True - unicode_errors.append(new_fpath[srcroot_len:]) + unicode_errors.append(new_fpath[ed_len:]) fname = new_fname fpath = new_fpath else: @@ -3325,7 +3584,7 @@ class dblink(object): relative_path = fpath[srcroot_len:] - if "\n" in relative_path: + if line_ending_re.search(relative_path) is not None: paths_with_newlines.append(relative_path) file_mode = os.lstat(fpath).st_mode @@ -3340,19 +3599,20 @@ class dblink(object): if unicode_error: break - if not unicode_error: + if not (unicode_error or eagain_error): break if unicode_errors: - eerror(portage._merge_unicode_error(unicode_errors)) + self._elog("eqawarn", "preinst", + _merge_unicode_error(unicode_errors)) if paths_with_newlines: msg = [] - msg.append(_("This package installs one or more files containing a newline (\\n) character:")) + msg.append(_("This package installs one or more files containing line ending characters:")) msg.append("") paths_with_newlines.sort() for f in paths_with_newlines: - msg.append("\t/%s" % (f.replace("\n", "\\n"))) + msg.append("\t/%s" % (f.replace("\n", "\\n").replace("\r", "\\r"))) msg.append("") msg.append(_("package %s NOT merged") % self.mycpv) msg.append("") @@ -3394,14 +3654,6 @@ class dblink(object): if installed_files: return 1 - # check for package collisions - blockers = self._blockers - if blockers is None: - blockers = [] - collisions, symlink_collisions, plib_collisions = \ - self._collision_protect(srcroot, destroot, - others_in_slot + blockers, myfilelist, mylinklist) - # Make sure the ebuild environment is initialized and that ${T}/elog # exists for logging of collision-protect eerror messages. if myebuild is None: @@ -3413,6 +3665,29 @@ class dblink(object): for other in others_in_slot]) prepare_build_dirs(settings=self.settings, cleanup=cleanup) + # check for package collisions + blockers = self._blockers + if blockers is None: + blockers = [] + collisions, symlink_collisions, plib_collisions = \ + self._collision_protect(srcroot, destroot, + others_in_slot + blockers, myfilelist, mylinklist) + + if symlink_collisions: + # Symlink collisions need to be distinguished from other types + # of collisions, in order to avoid confusion (see bug #409359). + msg = _("Package '%s' has one or more collisions " + "between symlinks and directories, which is explicitly " + "forbidden by PMS section 13.4 (see bug #326685):") % \ + (self.settings.mycpv,) + msg = textwrap.wrap(msg, 70) + msg.append("") + for f in symlink_collisions: + msg.append("\t%s" % os.path.join(destroot, + f.lstrip(os.path.sep))) + msg.append("") + self._elog("eerror", "preinst", msg) + if collisions: collision_protect = "collision-protect" in self.settings.features protect_owned = "protect-owned" in self.settings.features @@ -3494,12 +3769,20 @@ class dblink(object): eerror([_("None of the installed" " packages claim the file(s)."), ""]) + symlink_abort_msg =_("Package '%s' NOT merged since it has " + "one or more collisions between symlinks and directories, " + "which is explicitly forbidden by PMS section 13.4 " + "(see bug #326685).") + # The explanation about the collision and how to solve # it may not be visible via a scrollback buffer, especially # if the number of file collisions is large. Therefore, # show a summary at the end. abort = False - if collision_protect: + if symlink_collisions: + abort = True + msg = symlink_abort_msg % (self.settings.mycpv,) + elif collision_protect: abort = True msg = _("Package '%s' NOT merged due to file collisions.") % \ self.settings.mycpv @@ -3507,12 +3790,6 @@ class dblink(object): abort = True msg = _("Package '%s' NOT merged due to file collisions.") % \ self.settings.mycpv - elif symlink_collisions: - abort = True - msg = _("Package '%s' NOT merged due to collision " + \ - "between a symlink and a directory which is explicitly " + \ - "forbidden by PMS (see bug #326685).") % \ - (self.settings.mycpv,) else: msg = _("Package '%s' merged despite file collisions.") % \ self.settings.mycpv @@ -3558,10 +3835,12 @@ class dblink(object): # write local package counter for recording if counter is None: counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv) - io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'), + f = io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'), encoding=_encodings['fs'], errors='strict'), mode='w', encoding=_encodings['repo.content'], - errors='backslashreplace').write(_unicode_decode(str(counter))) + errors='backslashreplace') + f.write(_unicode_decode(str(counter))) + f.close() self.updateprotect() @@ -3577,9 +3856,8 @@ class dblink(object): # Always behave like --noconfmem is enabled for downgrades # so that people who don't know about this option are less # likely to get confused when doing upgrade/downgrade cycles. - pv_split = catpkgsplit(self.mycpv)[1:] for other in others_in_slot: - if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0: + if vercmp(self.mycpv.version, other.mycpv.version) < 0: cfgfiledict["IGNORE"] = 1 break @@ -3798,22 +4076,11 @@ class dblink(object): showMessage(_("!!! FAILED postinst: ")+str(a)+"\n", level=logging.ERROR, noiselevel=-1) - downgrade = False - for v in otherversions: - if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0: - downgrade = True - - # Lock the config memory file to prevent symlink creation - # in merge_contents from overlapping with env-update. - self.vartree.dbapi._fs_lock() - try: - #update environment settings, library paths. DO NOT change symlinks. - env_update(makelinks=(not downgrade), - target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes, - contents=contents, env=self.settings.environ(), - writemsg_level=self._display_merge) - finally: - self.vartree.dbapi._fs_unlock() + #update environment settings, library paths. DO NOT change symlinks. + env_update( + target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes, + contents=contents, env=self.settings, + writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi) # For gcc upgrades, preserved libs have to be removed after the # the library path has been updated. @@ -3867,7 +4134,8 @@ class dblink(object): # we do a first merge; this will recurse through all files in our srcroot but also build up a # "second hand" of symlinks to merge later - if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime): + if self.mergeme(srcroot, destroot, outfile, secondhand, + self.settings["EPREFIX"].lstrip(os.sep), cfgfiledict, mymtime): return 1 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are @@ -3936,7 +4204,7 @@ class dblink(object): @param thismtime: The current time (typically long(time.time()) @type thismtime: Long @rtype: None or Boolean - @returns: + @return: 1. True on failure 2. None otherwise @@ -3952,6 +4220,10 @@ class dblink(object): destroot = normalize_path(destroot).rstrip(sep) + sep calc_prelink = "prelink-checksums" in self.settings.features + protect_if_modified = \ + "config-protect-if-modified" in self.settings.features and \ + self._installed_instance is not None + # this is supposed to merge a list of files. There will be 2 forms of argument passing. if isinstance(stufftomerge, basestring): #A directory is specified. Figure out protection paths, listdir() it and process it. @@ -3985,14 +4257,37 @@ class dblink(object): if stat.S_ISLNK(mymode): # we are merging a symbolic link - myabsto = abssymlink(mysrc) + # The file name of mysrc and the actual file that it points to + # will have earlier been forcefully converted to the 'merge' + # encoding if necessary, but the content of the symbolic link + # may need to be forcefully converted here. + myto = _os.readlink(_unicode_encode(mysrc, + encoding=_encodings['merge'], errors='strict')) + try: + myto = _unicode_decode(myto, + encoding=_encodings['merge'], errors='strict') + except UnicodeDecodeError: + myto = _unicode_decode(myto, encoding=_encodings['merge'], + errors='replace') + myto = _unicode_encode(myto, encoding='ascii', + errors='backslashreplace') + myto = _unicode_decode(myto, encoding=_encodings['merge'], + errors='replace') + os.unlink(mysrc) + os.symlink(myto, mysrc) + + # Pass in the symlink target in order to bypass the + # os.readlink() call inside abssymlink(), since that + # call is unsafe if the merge encoding is not ascii + # or utf_8 (see bug #382021). + myabsto = abssymlink(mysrc, target=myto) + if myabsto.startswith(srcroot): myabsto = myabsto[len(srcroot):] myabsto = myabsto.lstrip(sep) - myto = os.readlink(mysrc) if self.settings and self.settings["D"]: if myto.startswith(self.settings["D"]): - myto = myto[len(self.settings["D"]):] + myto = myto[len(self.settings["D"])-1:] # myrealto contains the path of the real file to which this symlink points. # we can simply test for existence of this file to see if the target has been merged yet myrealto = normalize_path(os.path.join(destroot, myabsto)) @@ -4170,9 +4465,18 @@ class dblink(object): # now, config file management may come into play. # we only need to tweak mydest if cfg file management is in play. if protected: + destmd5 = perform_md5(mydest, calc_prelink=calc_prelink) + if protect_if_modified: + contents_key = \ + self._installed_instance._match_contents(myrealdest) + if contents_key: + inst_info = self._installed_instance.getcontents()[contents_key] + if inst_info[0] == "obj" and inst_info[2] == destmd5: + protected = False + + if protected: # we have a protection path; enable config file management. cfgprot = 0 - destmd5 = perform_md5(mydest, calc_prelink=calc_prelink) if mymd5 == destmd5: #file already in place; simply update mtimes of destination moveme = 1 @@ -4207,10 +4511,10 @@ class dblink(object): # as hardlinks (having identical st_dev and st_ino). hardlink_key = (mystat.st_dev, mystat.st_ino) - hardlink_candidates = self._md5_merge_map.get(hardlink_key) + hardlink_candidates = self._hardlink_merge_map.get(hardlink_key) if hardlink_candidates is None: hardlink_candidates = [] - self._md5_merge_map[hardlink_key] = hardlink_candidates + self._hardlink_merge_map[hardlink_key] = hardlink_candidates mymtime = movefile(mysrc, mydest, newmtime=thismtime, sstat=mystat, mysettings=self.settings, @@ -4218,8 +4522,7 @@ class dblink(object): encoding=_encodings['merge']) if mymtime is None: return 1 - if hardlink_candidates is not None: - hardlink_candidates.append(mydest) + hardlink_candidates.append(mydest) zing = ">>>" if mymtime != None: @@ -4445,6 +4748,7 @@ def write_contents(contents, root, f): def tar_contents(contents, root, tar, protect=None, onProgress=None): os = _os_merge + encoding = _encodings['merge'] try: for x in contents: @@ -4464,7 +4768,9 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None): pass else: os = portage.os + encoding = _encodings['fs'] + tar.encoding = encoding root = normalize_path(root).rstrip(os.path.sep) + os.path.sep id_strings = {} maxval = len(contents) @@ -4486,7 +4792,7 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None): continue contents_type = contents[path][0] if path.startswith(root): - arcname = path[len(root):] + arcname = "./" + path[len(root):] else: raise ValueError("invalid root argument: '%s'" % root) live_path = path @@ -4498,7 +4804,51 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None): # recorded as a real directory in the tar file to ensure that tar # can properly extract it's children. live_path = os.path.realpath(live_path) - tarinfo = tar.gettarinfo(live_path, arcname) + lst = os.lstat(live_path) + + # Since os.lstat() inside TarFile.gettarinfo() can trigger a + # UnicodeEncodeError when python has something other than utf_8 + # return from sys.getfilesystemencoding() (as in bug #388773), + # we implement the needed functionality here, using the result + # of our successful lstat call. An alternative to this would be + # to pass in the fileobj argument to TarFile.gettarinfo(), so + # that it could use fstat instead of lstat. However, that would + # have the unwanted effect of dereferencing symlinks. + + tarinfo = tar.tarinfo() + tarinfo.name = arcname + tarinfo.mode = lst.st_mode + tarinfo.uid = lst.st_uid + tarinfo.gid = lst.st_gid + tarinfo.size = 0 + tarinfo.mtime = lst.st_mtime + tarinfo.linkname = "" + if stat.S_ISREG(lst.st_mode): + inode = (lst.st_ino, lst.st_dev) + if (lst.st_nlink > 1 and + inode in tar.inodes and + arcname != tar.inodes[inode]): + tarinfo.type = tarfile.LNKTYPE + tarinfo.linkname = tar.inodes[inode] + else: + tar.inodes[inode] = arcname + tarinfo.type = tarfile.REGTYPE + tarinfo.size = lst.st_size + elif stat.S_ISDIR(lst.st_mode): + tarinfo.type = tarfile.DIRTYPE + elif stat.S_ISLNK(lst.st_mode): + tarinfo.type = tarfile.SYMTYPE + tarinfo.linkname = os.readlink(live_path) + else: + continue + try: + tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] + except KeyError: + pass + try: + tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] + except KeyError: + pass if stat.S_ISREG(lst.st_mode): if protect and protect(path): @@ -4515,7 +4865,7 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None): f.close() else: f = open(_unicode_encode(path, - encoding=object.__getattribute__(os, '_encoding'), + encoding=encoding, errors='strict'), 'rb') try: tar.addfile(tarinfo, f) diff --git a/portage_with_autodep/pym/portage/dbapi/vartree.pyo b/portage_with_autodep/pym/portage/dbapi/vartree.pyo Binary files differnew file mode 100644 index 0000000..7c186cf --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/vartree.pyo diff --git a/portage_with_autodep/pym/portage/dbapi/virtual.py b/portage_with_autodep/pym/portage/dbapi/virtual.py index ec97ffe..da15983 100644 --- a/portage_with_autodep/pym/portage/dbapi/virtual.py +++ b/portage_with_autodep/pym/portage/dbapi/virtual.py @@ -1,9 +1,10 @@ -# Copyright 1998-2007 Gentoo Foundation +# Copyright 1998-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 from portage.dbapi import dbapi -from portage import cpv_getkey +from portage.dbapi.dep_expand import dep_expand +from portage.versions import cpv_getkey, _pkg_str class fakedbapi(dbapi): """A fake dbapi that allows consumers to inject/remove packages to/from it @@ -31,27 +32,30 @@ class fakedbapi(dbapi): self._match_cache = {} def match(self, origdep, use_cache=1): - result = self._match_cache.get(origdep, None) + atom = dep_expand(origdep, mydb=self, settings=self.settings) + cache_key = (atom, atom.unevaluated_atom) + result = self._match_cache.get(cache_key) if result is not None: return result[:] - result = dbapi.match(self, origdep, use_cache=use_cache) - self._match_cache[origdep] = result + result = list(self._iter_match(atom, self.cp_list(atom.cp))) + self._match_cache[cache_key] = result return result[:] def cpv_exists(self, mycpv, myrepo=None): return mycpv in self.cpvdict def cp_list(self, mycp, use_cache=1, myrepo=None): - cachelist = self._match_cache.get(mycp) - # cp_list() doesn't expand old-style virtuals - if cachelist and cachelist[0].startswith(mycp): + # NOTE: Cache can be safely shared with the match cache, since the + # match cache uses the result from dep_expand for the cache_key. + cache_key = (mycp, mycp) + cachelist = self._match_cache.get(cache_key) + if cachelist is not None: return cachelist[:] cpv_list = self.cpdict.get(mycp) if cpv_list is None: cpv_list = [] self._cpv_sort_ascending(cpv_list) - if not (not cpv_list and mycp.startswith("virtual/")): - self._match_cache[mycp] = cpv_list + self._match_cache[cache_key] = cpv_list return cpv_list[:] def cp_all(self): @@ -70,7 +74,13 @@ class fakedbapi(dbapi): @param metadata: dict """ self._clear_cache() - mycp = cpv_getkey(mycpv) + if not hasattr(mycpv, 'cp'): + if metadata is None: + mycpv = _pkg_str(mycpv) + else: + mycpv = _pkg_str(mycpv, slot=metadata.get('SLOT'), + repo=metadata.get('repository')) + mycp = mycpv.cp self.cpvdict[mycpv] = metadata myslot = None if self._exclusive_slots and metadata: diff --git a/portage_with_autodep/pym/portage/dbapi/virtual.pyo b/portage_with_autodep/pym/portage/dbapi/virtual.pyo Binary files differnew file mode 100644 index 0000000..9f7c667 --- /dev/null +++ b/portage_with_autodep/pym/portage/dbapi/virtual.pyo diff --git a/portage_with_autodep/pym/portage/debug.py b/portage_with_autodep/pym/portage/debug.py index ce642fe..ebf1a13 100644 --- a/portage_with_autodep/pym/portage/debug.py +++ b/portage_with_autodep/pym/portage/debug.py @@ -1,4 +1,4 @@ -# Copyright 1999-2011 Gentoo Foundation +# Copyright 1999-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import os @@ -26,7 +26,7 @@ class trace_handler(object): def __init__(self): python_system_paths = [] for x in sys.path: - if os.path.basename(x).startswith("python2."): + if os.path.basename(x) == "python%s.%s" % sys.version_info[:2]: python_system_paths.append(x) self.ignore_prefixes = [] diff --git a/portage_with_autodep/pym/portage/debug.pyo b/portage_with_autodep/pym/portage/debug.pyo Binary files differnew file mode 100644 index 0000000..82a5e8f --- /dev/null +++ b/portage_with_autodep/pym/portage/debug.pyo diff --git a/portage_with_autodep/pym/portage/dep/__init__.py b/portage_with_autodep/pym/portage/dep/__init__.py index fd5ad30..152af0a 100644 --- a/portage_with_autodep/pym/portage/dep/__init__.py +++ b/portage_with_autodep/pym/portage/dep/__init__.py @@ -1,5 +1,5 @@ # deps.py -- Portage dependency resolution functions -# Copyright 2003-2011 Gentoo Foundation +# Copyright 2003-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = [ @@ -30,13 +30,20 @@ __all__ = [ import re, sys import warnings from itertools import chain + +import portage +portage.proxy.lazyimport.lazyimport(globals(), + 'portage.util:cmp_sort_key,writemsg', +) + from portage import _unicode_decode from portage.eapi import eapi_has_slot_deps, eapi_has_src_uri_arrows, \ - eapi_has_use_deps, eapi_has_strong_blocks, eapi_has_use_dep_defaults + eapi_has_use_deps, eapi_has_strong_blocks, eapi_has_use_dep_defaults, \ + eapi_has_repo_deps, eapi_allows_dots_in_PN, eapi_allows_dots_in_use_flags from portage.exception import InvalidAtom, InvalidData, InvalidDependString from portage.localization import _ from portage.versions import catpkgsplit, catsplit, \ - pkgcmp, ververify, _cp, _cpv + vercmp, ververify, _cp, _cpv, _pkg_str, _unknown_repo import portage.cache.mappings if sys.hexversion >= 0x3000000: @@ -55,7 +62,7 @@ def cpvequal(cpv1, cpv2): @param cpv2: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1" @type cpv2: String @rtype: Boolean - @returns: + @return: 1. True if cpv1 = cpv2 2. False Otherwise 3. Throws PortageException if cpv1 or cpv2 is not a CPV @@ -67,16 +74,27 @@ def cpvequal(cpv1, cpv2): """ - split1 = catpkgsplit(cpv1) - split2 = catpkgsplit(cpv2) - - if not split1 or not split2: + try: + try: + split1 = cpv1.cpv_split + except AttributeError: + cpv1 = _pkg_str(cpv1) + split1 = cpv1.cpv_split + + try: + split2 = cpv2.cpv_split + except AttributeError: + cpv2 = _pkg_str(cpv2) + split2 = cpv2.cpv_split + + except InvalidData: raise portage.exception.PortageException(_("Invalid data '%s, %s', parameter was not a CPV") % (cpv1, cpv2)) - - if split1[0] != split2[0]: + + if split1[0] != split2[0] or \ + split1[1] != split2[1]: return False - - return (pkgcmp(split1[1:], split2[1:]) == 0) + + return vercmp(cpv1.version, cpv2.version) == 0 def strip_empty(myarr): """ @@ -635,8 +653,8 @@ def flatten(mylist): _usedep_re = { - "0": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"), - "4-python": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@.-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"), + "dots_disallowed_in_use_flags": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"), + "dots_allowed_in_use_flags": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@.-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"), } def _get_usedep_re(eapi): @@ -649,10 +667,10 @@ def _get_usedep_re(eapi): @return: A regular expression object that matches valid USE deps for the given eapi. """ - if eapi in (None, "4-python",): - return _usedep_re["4-python"] + if eapi is None or eapi_allows_dots_in_use_flags(eapi): + return _usedep_re["dots_allowed_in_use_flags"] else: - return _usedep_re["0"] + return _usedep_re["dots_disallowed_in_use_flags"] class _use_dep(object): @@ -1068,6 +1086,10 @@ class Atom(_atom_base): _atom_base.__init__(s) + atom_re = _get_atom_re(eapi) + if eapi_has_repo_deps(eapi): + allow_repo = True + if "!" == s[:1]: blocker = self._blocker(forbid_overlap=("!" == s[1:2])) if blocker.overlap.forbid: @@ -1077,11 +1099,11 @@ class Atom(_atom_base): else: blocker = False self.__dict__['blocker'] = blocker - m = _atom_re.match(s) + m = atom_re.match(s) extended_syntax = False if m is None: if allow_wildcard: - m = _atom_wildcard_re.match(s) + m = _get_atom_wildcard_re(eapi).match(s) if m is None: raise InvalidAtom(self) op = None @@ -1096,38 +1118,44 @@ class Atom(_atom_base): else: raise InvalidAtom(self) elif m.group('op') is not None: - base = _atom_re.groupindex['op'] + base = atom_re.groupindex['op'] op = m.group(base + 1) cpv = m.group(base + 2) cp = m.group(base + 3) - slot = m.group(_atom_re.groups - 2) - repo = m.group(_atom_re.groups - 1) - use_str = m.group(_atom_re.groups) + slot = m.group(atom_re.groups - 2) + repo = m.group(atom_re.groups - 1) + use_str = m.group(atom_re.groups) if m.group(base + 4) is not None: raise InvalidAtom(self) elif m.group('star') is not None: - base = _atom_re.groupindex['star'] + base = atom_re.groupindex['star'] op = '=*' cpv = m.group(base + 1) cp = m.group(base + 2) - slot = m.group(_atom_re.groups - 2) - repo = m.group(_atom_re.groups - 1) - use_str = m.group(_atom_re.groups) + slot = m.group(atom_re.groups - 2) + repo = m.group(atom_re.groups - 1) + use_str = m.group(atom_re.groups) if m.group(base + 3) is not None: raise InvalidAtom(self) elif m.group('simple') is not None: op = None - cpv = cp = m.group(_atom_re.groupindex['simple'] + 1) - slot = m.group(_atom_re.groups - 2) - repo = m.group(_atom_re.groups - 1) - use_str = m.group(_atom_re.groups) - if m.group(_atom_re.groupindex['simple'] + 2) is not None: + cpv = cp = m.group(atom_re.groupindex['simple'] + 1) + slot = m.group(atom_re.groups - 2) + repo = m.group(atom_re.groups - 1) + use_str = m.group(atom_re.groups) + if m.group(atom_re.groupindex['simple'] + 2) is not None: raise InvalidAtom(self) else: raise AssertionError(_("required group not found in atom: '%s'") % self) self.__dict__['cp'] = cp - self.__dict__['cpv'] = cpv + try: + self.__dict__['cpv'] = _pkg_str(cpv) + self.__dict__['version'] = self.cpv.version + except InvalidData: + # plain cp, wildcard, or something + self.__dict__['cpv'] = cpv + self.__dict__['version'] = None self.__dict__['repo'] = repo self.__dict__['slot'] = slot self.__dict__['operator'] = op @@ -1216,6 +1244,23 @@ class Atom(_atom_base): return Atom(self.replace(_slot_separator + self.slot, '', 1), allow_repo=True, allow_wildcard=True) + def with_repo(self, repo): + atom = remove_slot(self) + if self.slot is not None: + atom += _slot_separator + self.slot + atom += _repo_separator + repo + if self.use is not None: + atom += str(self.use) + return Atom(atom, allow_repo=True, allow_wildcard=True) + + def with_slot(self, slot): + atom = remove_slot(self) + _slot_separator + slot + if self.repo is not None: + atom += _repo_separator + self.repo + if self.use is not None: + atom += str(self.use) + return Atom(atom, allow_repo=True, allow_wildcard=True) + def __setattr__(self, name, value): raise AttributeError("Atom instances are immutable", self.__class__, name, value) @@ -1353,10 +1398,13 @@ class ExtendedAtomDict(portage.cache.mappings.MutableMapping): yield k def iteritems(self): - for item in self._normal.items(): - yield item - for item in self._extended.items(): - yield item + try: + for item in self._normal.items(): + yield item + for item in self._extended.items(): + yield item + except AttributeError: + pass # FEATURES=python-trace def __delitem__(self, cp): if "*" in cp: @@ -1610,20 +1658,45 @@ _repo_separator = "::" _repo_name = r'[\w][\w-]*' _repo = r'(?:' + _repo_separator + '(' + _repo_name + ')' + ')?' -_atom_re = re.compile('^(?P<without_use>(?:' + - '(?P<op>' + _op + _cpv + ')|' + - '(?P<star>=' + _cpv + r'\*)|' + - '(?P<simple>' + _cp + '))' + - '(' + _slot_separator + _slot + ')?' + _repo + ')(' + _use + ')?$', re.VERBOSE) +_atom_re = { + "dots_disallowed_in_PN": re.compile('^(?P<without_use>(?:' + + '(?P<op>' + _op + _cpv['dots_disallowed_in_PN'] + ')|' + + '(?P<star>=' + _cpv['dots_disallowed_in_PN'] + r'\*)|' + + '(?P<simple>' + _cp['dots_disallowed_in_PN'] + '))' + + '(' + _slot_separator + _slot + ')?' + _repo + ')(' + _use + ')?$', re.VERBOSE), + "dots_allowed_in_PN": re.compile('^(?P<without_use>(?:' + + '(?P<op>' + _op + _cpv['dots_allowed_in_PN'] + ')|' + + '(?P<star>=' + _cpv['dots_allowed_in_PN'] + r'\*)|' + + '(?P<simple>' + _cp['dots_allowed_in_PN'] + '))' + + '(' + _slot_separator + _slot + ')?' + _repo + ')(' + _use + ')?$', re.VERBOSE), +} + +def _get_atom_re(eapi): + if eapi is None or eapi_allows_dots_in_PN(eapi): + return _atom_re["dots_allowed_in_PN"] + else: + return _atom_re["dots_disallowed_in_PN"] _extended_cat = r'[\w+*][\w+.*-]*' -_extended_pkg = r'[\w+*][\w+*-]*?' +_extended_pkg = { + "dots_disallowed_in_PN": r'[\w+*][\w+*-]*?', + "dots_allowed_in_PN": r'[\w+*][\w+.*-]*?', +} -_atom_wildcard_re = re.compile('(?P<simple>(' + _extended_cat + ')/(' + _extended_pkg + '))(:(?P<slot>' + _slot + '))?(' + _repo_separator + '(?P<repo>' + _repo_name + '))?$') +_atom_wildcard_re = { + "dots_disallowed_in_PN": re.compile('(?P<simple>(' + _extended_cat + ')/(' + _extended_pkg['dots_disallowed_in_PN'] + '))(:(?P<slot>' + _slot + '))?(' + _repo_separator + '(?P<repo>' + _repo_name + '))?$'), + "dots_allowed_in_PN": re.compile('(?P<simple>(' + _extended_cat + ')/(' + _extended_pkg['dots_allowed_in_PN'] + '))(:(?P<slot>' + _slot + '))?(' + _repo_separator + '(?P<repo>' + _repo_name + '))?$'), +} + +def _get_atom_wildcard_re(eapi): + if eapi is None or eapi_allows_dots_in_PN(eapi): + return _atom_wildcard_re["dots_allowed_in_PN"] + else: + return _atom_wildcard_re["dots_disallowed_in_PN"] _useflag_re = { - "0": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@-]*$'), - "4-python": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@.-]*$'), + "dots_disallowed_in_use_flags": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@-]*$'), + "dots_allowed_in_use_flags": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@.-]*$'), } def _get_useflag_re(eapi): @@ -1636,10 +1709,10 @@ def _get_useflag_re(eapi): @return: A regular expression object that matches valid USE flags for the given eapi. """ - if eapi in (None, "4-python",): - return _useflag_re["4-python"] + if eapi is None or eapi_allows_dots_in_use_flags(eapi): + return _useflag_re["dots_allowed_in_use_flags"] else: - return _useflag_re["0"] + return _useflag_re["dots_disallowed_in_use_flags"] def isvalidatom(atom, allow_blockers=False, allow_wildcard=False, allow_repo=False): """ @@ -1753,7 +1826,14 @@ def match_to_list(mypkg, mylist): @rtype: List @return: A unique list of package atoms that match the given package atom """ - return [ x for x in set(mylist) if match_from_list(x, [mypkg]) ] + matches = set() + result = [] + pkgs = [mypkg] + for x in mylist: + if x not in matches and match_from_list(x, pkgs): + matches.add(x) + result.append(x) + return result def best_match_to_list(mypkg, mylist): """ @@ -1781,6 +1861,7 @@ def best_match_to_list(mypkg, mylist): '>':2, '<':2, '>=':2, '<=':2, None:1} maxvalue = -2 bestm = None + mypkg_cpv = None for x in match_to_list(mypkg, mylist): if x.extended_syntax: if dep_getslot(x) is not None: @@ -1800,6 +1881,31 @@ def best_match_to_list(mypkg, mylist): if op_val > maxvalue: maxvalue = op_val bestm = x + elif op_val == maxvalue and op_val == 2: + # For >, <, >=, and <=, the one with the version + # closest to mypkg is the best match. + if mypkg_cpv is None: + try: + mypkg_cpv = mypkg.cpv + except AttributeError: + mypkg_cpv = _pkg_str(remove_slot(mypkg)) + if bestm.cpv == mypkg_cpv or bestm.cpv == x.cpv: + pass + elif x.cpv == mypkg_cpv: + bestm = x + else: + # Sort the cpvs to find the one closest to mypkg_cpv + cpv_list = [bestm.cpv, mypkg_cpv, x.cpv] + def cmp_cpv(cpv1, cpv2): + return vercmp(cpv1.version, cpv2.version) + cpv_list.sort(key=cmp_sort_key(cmp_cpv)) + if cpv_list[0] is mypkg_cpv or cpv_list[-1] is mypkg_cpv: + if cpv_list[1] is x.cpv: + bestm = x + else: + # TODO: handle the case where mypkg_cpv is in the middle + pass + return bestm def match_from_list(mydep, candidate_list): @@ -1817,7 +1923,6 @@ def match_from_list(mydep, candidate_list): if not candidate_list: return [] - from portage.util import writemsg if "!" == mydep[:1]: if "!" == mydep[1:2]: mydep = mydep[2:] @@ -1882,7 +1987,7 @@ def match_from_list(mydep, candidate_list): myver = mysplit[2].lstrip("0") if not myver or not myver[0].isdigit(): myver = "0"+myver - mycpv = mysplit[0]+"/"+mysplit[1]+"-"+myver + mycpv_cmp = mysplit[0]+"/"+mysplit[1]+"-"+myver for x in candidate_list: xs = getattr(x, "cpv_split", None) if xs is None: @@ -1891,7 +1996,7 @@ def match_from_list(mydep, candidate_list): if not myver or not myver[0].isdigit(): myver = "0"+myver xcpv = xs[0]+"/"+xs[1]+"-"+myver - if xcpv.startswith(mycpv): + if xcpv.startswith(mycpv_cmp): mylist.append(x) elif operator == "~": # version, any revision, match @@ -1908,15 +2013,19 @@ def match_from_list(mydep, candidate_list): mylist.append(x) elif operator in [">", ">=", "<", "<="]: - mysplit = ["%s/%s" % (cat, pkg), ver, rev] for x in candidate_list: - xs = getattr(x, "cpv_split", None) - if xs is None: - xs = catpkgsplit(remove_slot(x)) - xcat, xpkg, xver, xrev = xs - xs = ["%s/%s" % (xcat, xpkg), xver, xrev] + if hasattr(x, 'cp'): + pkg = x + else: + try: + pkg = _pkg_str(remove_slot(x)) + except InvalidData: + continue + + if pkg.cp != mydep.cp: + continue try: - result = pkgcmp(xs, mysplit) + result = vercmp(pkg.version, mydep.version) except ValueError: # pkgcmp may return ValueError during int() conversion writemsg(_("\nInvalid package name: %s\n") % x, noiselevel=-1) raise @@ -1993,7 +2102,8 @@ def match_from_list(mydep, candidate_list): repo = getattr(x, "repo", False) if repo is False: repo = dep_getrepo(x) - if repo is not None and repo != mydep.repo: + if repo is not None and repo != _unknown_repo and \ + repo != mydep.repo: continue mylist.append(x) diff --git a/portage_with_autodep/pym/portage/dep/__init__.pyo b/portage_with_autodep/pym/portage/dep/__init__.pyo Binary files differnew file mode 100644 index 0000000..c78bb23 --- /dev/null +++ b/portage_with_autodep/pym/portage/dep/__init__.pyo diff --git a/portage_with_autodep/pym/portage/dep/_slot_operator.py b/portage_with_autodep/pym/portage/dep/_slot_operator.py new file mode 100644 index 0000000..7b64444 --- /dev/null +++ b/portage_with_autodep/pym/portage/dep/_slot_operator.py @@ -0,0 +1,97 @@ +# Copyright 2012-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +from __future__ import unicode_literals + +from portage.dep import Atom, paren_enclose, use_reduce +from portage.eapi import _get_eapi_attrs +from portage.exception import InvalidData +from _emerge.Package import Package + +def find_built_slot_operator_atoms(pkg): + atoms = {} + for k in Package._dep_keys: + atom_list = list(_find_built_slot_operator(use_reduce(pkg._metadata[k], + uselist=pkg.use.enabled, eapi=pkg.eapi, + token_class=Atom))) + if atom_list: + atoms[k] = atom_list + return atoms + +def _find_built_slot_operator(dep_struct): + for x in dep_struct: + if isinstance(x, list): + for atom in _find_built_slot_operator(x): + yield atom + elif isinstance(x, Atom) and x.slot_operator_built: + yield x + +def ignore_built_slot_operator_deps(dep_struct): + for i, x in enumerate(dep_struct): + if isinstance(x, list): + ignore_built_slot_operator_deps(x) + elif isinstance(x, Atom) and x.slot_operator_built: + # There's no way of knowing here whether the SLOT + # part of the slot/sub-slot pair should be kept, so we + # ignore both parts. + dep_struct[i] = x.without_slot + +def evaluate_slot_operator_equal_deps(settings, use, trees): + + metadata = settings.configdict['pkg'] + eapi = metadata['EAPI'] + eapi_attrs = _get_eapi_attrs(eapi) + running_vardb = trees[trees._running_eroot]["vartree"].dbapi + target_vardb = trees[trees._target_eroot]["vartree"].dbapi + vardbs = [target_vardb] + deps = {} + for k in Package._dep_keys: + deps[k] = use_reduce(metadata[k], + uselist=use, eapi=eapi, token_class=Atom) + + for k in Package._runtime_keys: + _eval_deps(deps[k], vardbs) + + if eapi_attrs.hdepend: + _eval_deps(deps["HDEPEND"], [running_vardb]) + _eval_deps(deps["DEPEND"], [target_vardb]) + else: + if running_vardb is not target_vardb: + vardbs.append(running_vardb) + _eval_deps(deps["DEPEND"], vardbs) + + result = {} + for k, v in deps.items(): + result[k] = paren_enclose(v) + + return result + +def _eval_deps(dep_struct, vardbs): + for i, x in enumerate(dep_struct): + if isinstance(x, list): + _eval_deps(x, vardbs) + elif isinstance(x, Atom) and x.slot_operator == "=": + for vardb in vardbs: + best_version = vardb.match(x) + if best_version: + best_version = best_version[-1] + try: + best_version = \ + vardb._pkg_str(best_version, None) + except (KeyError, InvalidData): + pass + else: + slot_part = "%s/%s=" % \ + (best_version.slot, best_version.sub_slot) + x = x.with_slot(slot_part) + dep_struct[i] = x + break + else: + # this dep could not be resolved, so remove the operator + # (user may be using package.provided and managing rebuilds + # manually) + if x.slot: + x = x.with_slot(x.slot) + else: + x = x.without_slot + dep_struct[i] = x diff --git a/portage_with_autodep/pym/portage/dep/dep_check.py b/portage_with_autodep/pym/portage/dep/dep_check.py index 01d5021..99a5eb0 100644 --- a/portage_with_autodep/pym/portage/dep/dep_check.py +++ b/portage_with_autodep/pym/portage/dep/dep_check.py @@ -1,4 +1,4 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ['dep_check', 'dep_eval', 'dep_wordreduce', 'dep_zapdeps'] @@ -11,7 +11,7 @@ from portage.dep import Atom, match_from_list, use_reduce from portage.exception import InvalidDependString, ParseError from portage.localization import _ from portage.util import writemsg, writemsg_level -from portage.versions import catpkgsplit, cpv_getkey, pkgcmp +from portage.versions import vercmp, _pkg_str def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/", trees=None, use_mask=None, use_force=None, **kwargs): @@ -39,14 +39,12 @@ def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/", parent = mytrees.get("parent") virt_parent = mytrees.get("virt_parent") graph_parent = None - eapi = None if parent is not None: if virt_parent is not None: graph_parent = virt_parent parent = virt_parent else: graph_parent = parent - eapi = parent.metadata["EAPI"] repoman = not mysettings.local_config if kwargs["use_binaries"]: portdb = trees[myroot]["bintree"].dbapi @@ -352,8 +350,14 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None): avail_pkg = mydbapi.match(atom.without_use) if avail_pkg: avail_pkg = avail_pkg[-1] # highest (ascending order) - avail_slot = Atom("%s:%s" % (atom.cp, - mydbapi.aux_get(avail_pkg, ["SLOT"])[0])) + try: + slot = avail_pkg.slot + except AttributeError: + eapi, slot, repo = mydbapi.aux_get(avail_pkg, + ["EAPI", "SLOT", "repository"]) + avail_pkg = _pkg_str(avail_pkg, eapi=eapi, + slot=slot, repo=repo) + avail_slot = Atom("%s:%s" % (atom.cp, slot)) if not avail_pkg: all_available = False all_use_satisfied = False @@ -368,16 +372,19 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None): avail_pkg_use = avail_pkg_use[-1] if avail_pkg_use != avail_pkg: avail_pkg = avail_pkg_use - avail_slot = Atom("%s:%s" % (atom.cp, - mydbapi.aux_get(avail_pkg, ["SLOT"])[0])) + try: + slot = avail_pkg.slot + except AttributeError: + eapi, slot, repo = mydbapi.aux_get(avail_pkg, + ["EAPI", "SLOT", "repository"]) + avail_pkg = _pkg_str(avail_pkg, + eapi=eapi, slot=slot, repo=repo) slot_map[avail_slot] = avail_pkg - pkg_cp = cpv_getkey(avail_pkg) - highest_cpv = cp_map.get(pkg_cp) + highest_cpv = cp_map.get(avail_pkg.cp) if highest_cpv is None or \ - pkgcmp(catpkgsplit(avail_pkg)[1:], - catpkgsplit(highest_cpv)[1:]) > 0: - cp_map[pkg_cp] = avail_pkg + vercmp(avail_pkg.version, highest_cpv.version) > 0: + cp_map[avail_pkg.cp] = avail_pkg this_choice = (atoms, slot_map, cp_map, all_available) if all_available: @@ -515,8 +522,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None): for cp in intersecting_cps: version_1 = cp_map_1[cp] version_2 = cp_map_2[cp] - difference = pkgcmp(catpkgsplit(version_1)[1:], - catpkgsplit(version_2)[1:]) + difference = vercmp(version_1.version, version_2.version) if difference != 0: if difference > 0: has_upgrade = True @@ -539,8 +545,12 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None): assert(False) # This point should not be reachable def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None, - use_cache=1, use_binaries=0, myroot="/", trees=None): - """Takes a depend string and parses the condition.""" + use_cache=1, use_binaries=0, myroot=None, trees=None): + """ + Takes a depend string, parses it, and selects atoms. + The myroot parameter is unused (use mysettings['EROOT'] instead). + """ + myroot = mysettings['EROOT'] edebug = mysettings.get("PORTAGE_DEBUG", None) == "1" #check_config_instance(mysettings) if trees is None: diff --git a/portage_with_autodep/pym/portage/dep/dep_check.pyo b/portage_with_autodep/pym/portage/dep/dep_check.pyo Binary files differnew file mode 100644 index 0000000..1b9e03f --- /dev/null +++ b/portage_with_autodep/pym/portage/dep/dep_check.pyo diff --git a/portage_with_autodep/pym/portage/dispatch_conf.py b/portage_with_autodep/pym/portage/dispatch_conf.py index 4991020..4c68dfc 100644 --- a/portage_with_autodep/pym/portage/dispatch_conf.py +++ b/portage_with_autodep/pym/portage/dispatch_conf.py @@ -1,5 +1,5 @@ # archive_conf.py -- functionality common to archive-conf and dispatch-conf -# Copyright 2003-2011 Gentoo Foundation +# Copyright 2003-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 @@ -8,11 +8,12 @@ from __future__ import print_function -import os, sys, shutil +import os, shutil, subprocess, sys import portage from portage.env.loaders import KeyValuePairFileLoader from portage.localization import _ +from portage.util import shlex_split, varexpand RCS_BRANCH = '1.1.1' RCS_LOCK = 'rcs -ko -M -l' @@ -22,24 +23,29 @@ RCS_MERGE = "rcsmerge -p -r" + RCS_BRANCH + " '%s' > '%s'" DIFF3_MERGE = "diff3 -mE '%s' '%s' '%s' > '%s'" -def diffstatusoutput_len(cmd): +def diffstatusoutput(cmd, file1, file2): """ Execute the string cmd in a shell with getstatusoutput() and return a - 2-tuple (status, output_length). If getstatusoutput() raises - UnicodeDecodeError (known to happen with python3.1), return a - 2-tuple (1, 1). This provides a simple way to check for non-zero - output length of diff commands, while providing simple handling of - UnicodeDecodeError when necessary. + 2-tuple (status, output). """ - try: - status, output = portage.subprocess_getstatusoutput(cmd) - return (status, len(output)) - except UnicodeDecodeError: - return (1, 1) + # Use Popen to emulate getstatusoutput(), since getstatusoutput() may + # raise a UnicodeDecodeError which makes the output inaccessible. + args = shlex_split(cmd % (file1, file2)) + if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000: + # Python 3.1 does not support bytes in Popen args. + args = [portage._unicode_encode(x, errors='strict') for x in args] + proc = subprocess.Popen(args, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + output = portage._unicode_decode(proc.communicate()[0]) + if output and output[-1] == "\n": + # getstatusoutput strips one newline + output = output[:-1] + return (proc.wait(), output) def read_config(mandatory_opts): - loader = KeyValuePairFileLoader( - '/etc/dispatch-conf.conf', None) + eprefix = portage.const.EPREFIX + config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf") + loader = KeyValuePairFileLoader(config_path, None) opts, errors = loader.load() if not opts: print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr) @@ -58,6 +64,10 @@ def read_config(mandatory_opts): else: print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr) + # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding + variables = {"EPREFIX": eprefix} + opts['archive-dir'] = varexpand(opts['archive-dir'], mydict=variables) + if not os.path.exists(opts['archive-dir']): os.mkdir(opts['archive-dir']) # Use restrictive permissions by default, in order to protect @@ -132,7 +142,7 @@ def file_archive(archive, curconf, newconf, mrgconf): # Archive the current config file if it isn't already saved if os.path.exists(archive) \ - and diffstatusoutput_len("diff -aq '%s' '%s'" % (curconf,archive))[1] != 0: + and len(diffstatusoutput("diff -aq '%s' '%s'", curconf, archive)[1]) != 0: suf = 1 while suf < 9 and os.path.exists(archive + '.' + str(suf)): suf += 1 diff --git a/portage_with_autodep/pym/portage/dispatch_conf.pyo b/portage_with_autodep/pym/portage/dispatch_conf.pyo Binary files differnew file mode 100644 index 0000000..6239859 --- /dev/null +++ b/portage_with_autodep/pym/portage/dispatch_conf.pyo diff --git a/portage_with_autodep/pym/portage/eapi.py b/portage_with_autodep/pym/portage/eapi.py index da5fd8c..79cf891 100644 --- a/portage_with_autodep/pym/portage/eapi.py +++ b/portage_with_autodep/pym/portage/eapi.py @@ -1,4 +1,4 @@ -# Copyright 2010 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 def eapi_has_iuse_defaults(eapi): @@ -34,6 +34,9 @@ def eapi_exports_merge_type(eapi): def eapi_exports_replace_vars(eapi): return eapi not in ("0", "1", "2", "3") +def eapi_exports_REPOSITORY(eapi): + return eapi in ("4-python",) + def eapi_has_pkg_pretend(eapi): return eapi not in ("0", "1", "2", "3") @@ -48,3 +51,12 @@ def eapi_has_required_use(eapi): def eapi_has_use_dep_defaults(eapi): return eapi not in ("0", "1", "2", "3") + +def eapi_has_repo_deps(eapi): + return eapi in ("4-python",) + +def eapi_allows_dots_in_PN(eapi): + return eapi in ("4-python",) + +def eapi_allows_dots_in_use_flags(eapi): + return eapi in ("4-python",) diff --git a/portage_with_autodep/pym/portage/eapi.pyo b/portage_with_autodep/pym/portage/eapi.pyo Binary files differnew file mode 100644 index 0000000..ce67ab1 --- /dev/null +++ b/portage_with_autodep/pym/portage/eapi.pyo diff --git a/portage_with_autodep/pym/portage/eclass_cache.py b/portage_with_autodep/pym/portage/eclass_cache.py index 1374f1d..cb2cf8a 100644 --- a/portage_with_autodep/pym/portage/eclass_cache.py +++ b/portage_with_autodep/pym/portage/eclass_cache.py @@ -6,21 +6,59 @@ __all__ = ["cache"] import stat import sys +import operator from portage.util import normalize_path import errno -from portage.exception import PermissionDenied +from portage.exception import FileNotFound, PermissionDenied from portage import os +from portage import checksum if sys.hexversion >= 0x3000000: long = int + +class hashed_path(object): + + def __init__(self, location): + self.location = location + + def __getattr__(self, attr): + if attr == 'mtime': + # use stat.ST_MTIME; accessing .st_mtime gets you a float + # depending on the python version, and long(float) introduces + # some rounding issues that aren't present for people using + # the straight c api. + # thus use the defacto python compatibility work around; + # access via index, which guarantees you get the raw long. + try: + self.mtime = obj = os.stat(self.location)[stat.ST_MTIME] + except OSError as e: + if e.errno in (errno.ENOENT, errno.ESTALE): + raise FileNotFound(self.location) + elif e.errno == PermissionDenied.errno: + raise PermissionDenied(self.location) + raise + return obj + if not attr.islower(): + # we don't care to allow .mD5 as an alias for .md5 + raise AttributeError(attr) + hashname = attr.upper() + if hashname not in checksum.hashfunc_map: + raise AttributeError(attr) + val = checksum.perform_checksum(self.location, hashname)[0] + setattr(self, attr, val) + return val + + def __repr__(self): + return "<portage.eclass_cache.hashed_path('%s')>" % (self.location,) + class cache(object): """ Maintains the cache information about eclasses used in ebuild. """ def __init__(self, porttree_root, overlays=[]): - self.eclasses = {} # {"Name": ("location","_mtime_")} + self.eclasses = {} # {"Name": hashed_path} self._eclass_locations = {} # screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you. @@ -80,14 +118,16 @@ class cache(object): for y in eclass_filenames: if not y.endswith(".eclass"): continue + obj = hashed_path(os.path.join(x, y)) + obj.eclass_dir = x try: - mtime = os.stat(os.path.join(x, y))[stat.ST_MTIME] - except OSError: + mtime = obj.mtime + except FileNotFound: continue ys=y[:-eclass_len] if x == self._master_eclass_root: master_eclasses[ys] = mtime - self.eclasses[ys] = (x, mtime) + self.eclasses[ys] = obj self._eclass_locations[ys] = x continue @@ -98,22 +138,30 @@ class cache(object): # so prefer the master entry. continue - self.eclasses[ys] = (x, mtime) + self.eclasses[ys] = obj self._eclass_locations[ys] = x - def is_eclass_data_valid(self, ec_dict): + def validate_and_rewrite_cache(self, ec_dict, chf_type, stores_paths): + """ + This will return an empty dict if the ec_dict parameter happens + to be empty, therefore callers must take care to distinguish + between empty dict and None return values. + """ if not isinstance(ec_dict, dict): - return False - for eclass, tup in ec_dict.items(): - cached_data = self.eclasses.get(eclass, None) - """ Only use the mtime for validation since the probability of a - collision is small and, depending on the cache implementation, the - path may not be specified (cache from rsync mirrors, for example). - """ - if cached_data is None or tup[1] != cached_data[1]: - return False - - return True + return None + our_getter = operator.attrgetter(chf_type) + cache_getter = lambda x:x + if stores_paths: + cache_getter = operator.itemgetter(1) + d = {} + for eclass, ec_data in ec_dict.items(): + cached_data = self.eclasses.get(eclass) + if cached_data is None: + return None + if cache_getter(ec_data) != our_getter(cached_data): + return None + d[eclass] = cached_data + return d def get_eclass_data(self, inherits): ec_dict = {} diff --git a/portage_with_autodep/pym/portage/eclass_cache.pyo b/portage_with_autodep/pym/portage/eclass_cache.pyo Binary files differnew file mode 100644 index 0000000..ebe3463 --- /dev/null +++ b/portage_with_autodep/pym/portage/eclass_cache.pyo diff --git a/portage_with_autodep/pym/portage/elog/__init__.py b/portage_with_autodep/pym/portage/elog/__init__.py index 1a8309d..33dac17 100644 --- a/portage_with_autodep/pym/portage/elog/__init__.py +++ b/portage_with_autodep/pym/portage/elog/__init__.py @@ -1,7 +1,11 @@ # elog/__init__.py - elog core functions -# Copyright 2006-2009 Gentoo Foundation +# Copyright 2006-2011 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 +import sys +if sys.hexversion >= 0x3000000: + basestring = str + import portage portage.proxy.lazyimport.lazyimport(globals(), 'portage.util:writemsg', @@ -52,11 +56,15 @@ def _combine_logentries(logentries): for msgtype, msgcontent in logentries[phase]: if previous_type != msgtype: previous_type = msgtype - rValue.append("%s: %s\n" % (msgtype, phase)) - for line in msgcontent: - rValue.append(line) - rValue.append("\n") - return "".join(rValue) + rValue.append("%s: %s" % (msgtype, phase)) + if isinstance(msgcontent, basestring): + rValue.append(msgcontent.rstrip("\n")) + else: + for line in msgcontent: + rValue.append(line.rstrip("\n")) + if rValue: + rValue.append("") + return "\n".join(rValue) _elog_mod_imports = {} def _load_mod(name): diff --git a/portage_with_autodep/pym/portage/elog/__init__.pyo b/portage_with_autodep/pym/portage/elog/__init__.pyo Binary files differnew file mode 100644 index 0000000..39dc595 --- /dev/null +++ b/portage_with_autodep/pym/portage/elog/__init__.pyo diff --git a/portage_with_autodep/pym/portage/elog/filtering.pyo b/portage_with_autodep/pym/portage/elog/filtering.pyo Binary files differnew file mode 100644 index 0000000..3a040cc --- /dev/null +++ b/portage_with_autodep/pym/portage/elog/filtering.pyo diff --git a/portage_with_autodep/pym/portage/elog/messages.py b/portage_with_autodep/pym/portage/elog/messages.py index 6c1580a..a4897d8 100644 --- a/portage_with_autodep/pym/portage/elog/messages.py +++ b/portage_with_autodep/pym/portage/elog/messages.py @@ -18,6 +18,14 @@ from portage import _unicode_decode import io import sys +_log_levels = frozenset([ + "ERROR", + "INFO", + "LOG", + "QA", + "WARN", +]) + def collect_ebuild_messages(path): """ Collect elog messages generated by the bash logging function stored at 'path'. @@ -43,16 +51,21 @@ def collect_ebuild_messages(path): logentries[msgfunction] = [] lastmsgtype = None msgcontent = [] - for l in io.open(_unicode_encode(filename, + f = io.open(_unicode_encode(filename, encoding=_encodings['fs'], errors='strict'), - mode='r', encoding=_encodings['repo.content'], errors='replace'): + mode='r', encoding=_encodings['repo.content'], errors='replace') + # Use split('\n') since normal line iteration or readlines() will + # split on \r characters as shown in bug #390833. + for l in f.read().split('\n'): if not l: continue try: msgtype, msg = l.split(" ", 1) + if msgtype not in _log_levels: + raise ValueError(msgtype) except ValueError: writemsg(_("!!! malformed entry in " - "log file: '%s'\n") % filename, noiselevel=-1) + "log file: '%s': %s\n") % (filename, l), noiselevel=-1) continue if lastmsgtype is None: @@ -65,6 +78,7 @@ def collect_ebuild_messages(path): logentries[msgfunction].append((lastmsgtype, msgcontent)) msgcontent = [msg] lastmsgtype = msgtype + f.close() if msgcontent: logentries[msgfunction].append((lastmsgtype, msgcontent)) @@ -159,13 +173,17 @@ _functions = { "einfo": ("INFO", "GOOD"), "eerror": ("ERROR", "BAD"), } -def _make_msgfunction(level, color): - def _elog(msg, phase="other", key=None, out=None): - """ Display and log a message assigned to the given key/cpv - (or unassigned if no key is given). +class _make_msgfunction(object): + __slots__ = ('_color', '_level') + def __init__(self, level, color): + self._level = level + self._color = color + def __call__(self, msg, phase="other", key=None, out=None): + """ + Display and log a message assigned to the given key/cpv. """ - _elog_base(level, msg, phase=phase, key=key, color=color, out=out) - return _elog + _elog_base(self._level, msg, phase=phase, + key=key, color=self._color, out=out) for f in _functions: setattr(sys.modules[__name__], f, _make_msgfunction(_functions[f][0], _functions[f][1])) diff --git a/portage_with_autodep/pym/portage/elog/messages.pyo b/portage_with_autodep/pym/portage/elog/messages.pyo Binary files differnew file mode 100644 index 0000000..c033f55 --- /dev/null +++ b/portage_with_autodep/pym/portage/elog/messages.pyo diff --git a/portage_with_autodep/pym/portage/elog/mod_custom.pyo b/portage_with_autodep/pym/portage/elog/mod_custom.pyo Binary files differnew file mode 100644 index 0000000..317fab4 --- /dev/null +++ b/portage_with_autodep/pym/portage/elog/mod_custom.pyo diff --git a/portage_with_autodep/pym/portage/elog/mod_echo.py b/portage_with_autodep/pym/portage/elog/mod_echo.py index 5de25bf..59117be 100644 --- a/portage_with_autodep/pym/portage/elog/mod_echo.py +++ b/portage_with_autodep/pym/portage/elog/mod_echo.py @@ -18,6 +18,19 @@ def process(mysettings, key, logentries, fulltext): _items.append((mysettings["ROOT"], key, logentries)) def finalize(): + # For consistency, send all message types to stdout. + sys.stdout.flush() + sys.stderr.flush() + stderr = sys.stderr + try: + sys.stderr = sys.stdout + _finalize() + finally: + sys.stderr = stderr + sys.stdout.flush() + sys.stderr.flush() + +def _finalize(): global _items printer = EOutput() for root, key, logentries in _items: diff --git a/portage_with_autodep/pym/portage/elog/mod_echo.pyo b/portage_with_autodep/pym/portage/elog/mod_echo.pyo Binary files differnew file mode 100644 index 0000000..6a00d4c --- /dev/null +++ b/portage_with_autodep/pym/portage/elog/mod_echo.pyo diff --git a/portage_with_autodep/pym/portage/elog/mod_mail.pyo b/portage_with_autodep/pym/portage/elog/mod_mail.pyo Binary files differnew file mode 100644 index 0000000..5d87aa6 --- /dev/null +++ b/portage_with_autodep/pym/portage/elog/mod_mail.pyo diff --git a/portage_with_autodep/pym/portage/elog/mod_mail_summary.pyo b/portage_with_autodep/pym/portage/elog/mod_mail_summary.pyo Binary files differnew file mode 100644 index 0000000..d7306b5 --- /dev/null +++ b/portage_with_autodep/pym/portage/elog/mod_mail_summary.pyo diff --git a/portage_with_autodep/pym/portage/elog/mod_save.py b/portage_with_autodep/pym/portage/elog/mod_save.py index 9350a6e..c69f4a3 100644 --- a/portage_with_autodep/pym/portage/elog/mod_save.py +++ b/portage_with_autodep/pym/portage/elog/mod_save.py @@ -4,20 +4,22 @@ import io import time +import portage from portage import os from portage import _encodings from portage import _unicode_decode from portage import _unicode_encode from portage.data import portage_gid, portage_uid from portage.package.ebuild.prepare_build_dirs import _ensure_log_subdirs -from portage.util import ensure_dirs, normalize_path +from portage.util import apply_permissions, ensure_dirs, normalize_path def process(mysettings, key, logentries, fulltext): if mysettings.get("PORT_LOGDIR"): logdir = normalize_path(mysettings["PORT_LOGDIR"]) else: - logdir = os.path.join(os.sep, "var", "log", "portage") + logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep), + "var", "log", "portage") if not os.path.isdir(logdir): # Only initialize group/mode if the directory doesn't @@ -25,7 +27,10 @@ def process(mysettings, key, logentries, fulltext): # were previously set by the administrator. # NOTE: These permissions should be compatible with our # default logrotate config as discussed in bug 374287. - ensure_dirs(logdir, uid=portage_uid, gid=portage_gid, mode=0o2770) + uid = -1 + if portage.data.secpass >= 2: + uid = portage_uid + ensure_dirs(logdir, uid=uid, gid=portage_gid, mode=0o2770) cat = mysettings['CATEGORY'] pf = mysettings['PF'] @@ -48,4 +53,21 @@ def process(mysettings, key, logentries, fulltext): elogfile.write(_unicode_decode(fulltext)) elogfile.close() + # Copy group permission bits from parent directory. + elogdir_st = os.stat(log_subdir) + elogdir_gid = elogdir_st.st_gid + elogdir_grp_mode = 0o060 & elogdir_st.st_mode + + # Copy the uid from the parent directory if we have privileges + # to do so, for compatibility with our default logrotate + # config (see bug 378451). With the "su portage portage" + # directive and logrotate-3.8.0, logrotate's chown call during + # the compression phase will only succeed if the log file's uid + # is portage_uid. + logfile_uid = -1 + if portage.data.secpass >= 2: + logfile_uid = elogdir_st.st_uid + apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid, + mode=elogdir_grp_mode, mask=0) + return elogfilename diff --git a/portage_with_autodep/pym/portage/elog/mod_save.pyo b/portage_with_autodep/pym/portage/elog/mod_save.pyo Binary files differnew file mode 100644 index 0000000..fb28b76 --- /dev/null +++ b/portage_with_autodep/pym/portage/elog/mod_save.pyo diff --git a/portage_with_autodep/pym/portage/elog/mod_save_summary.py b/portage_with_autodep/pym/portage/elog/mod_save_summary.py index 4adc6f3..347f66e 100644 --- a/portage_with_autodep/pym/portage/elog/mod_save_summary.py +++ b/portage_with_autodep/pym/portage/elog/mod_save_summary.py @@ -4,6 +4,7 @@ import io import time +import portage from portage import os from portage import _encodings from portage import _unicode_decode @@ -17,7 +18,8 @@ def process(mysettings, key, logentries, fulltext): if mysettings.get("PORT_LOGDIR"): logdir = normalize_path(mysettings["PORT_LOGDIR"]) else: - logdir = os.path.join(os.sep, "var", "log", "portage") + logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep), + "var", "log", "portage") if not os.path.isdir(logdir): # Only initialize group/mode if the directory doesn't @@ -25,7 +27,10 @@ def process(mysettings, key, logentries, fulltext): # were previously set by the administrator. # NOTE: These permissions should be compatible with our # default logrotate config as discussed in bug 374287. - ensure_dirs(logdir, uid=portage_uid, gid=portage_gid, mode=0o2770) + logdir_uid = -1 + if portage.data.secpass >= 2: + logdir_uid = portage_uid + ensure_dirs(logdir, uid=logdir_uid, gid=portage_gid, mode=0o2770) elogdir = os.path.join(logdir, "elog") _ensure_log_subdirs(logdir, elogdir) @@ -40,7 +45,17 @@ def process(mysettings, key, logentries, fulltext): elogdir_st = os.stat(elogdir) elogdir_gid = elogdir_st.st_gid elogdir_grp_mode = 0o060 & elogdir_st.st_mode - apply_permissions(elogfilename, gid=elogdir_gid, + + # Copy the uid from the parent directory if we have privileges + # to do so, for compatibility with our default logrotate + # config (see bug 378451). With the "su portage portage" + # directive and logrotate-3.8.0, logrotate's chown call during + # the compression phase will only succeed if the log file's uid + # is portage_uid. + logfile_uid = -1 + if portage.data.secpass >= 2: + logfile_uid = elogdir_st.st_uid + apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid, mode=elogdir_grp_mode, mask=0) time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z", diff --git a/portage_with_autodep/pym/portage/elog/mod_save_summary.pyo b/portage_with_autodep/pym/portage/elog/mod_save_summary.pyo Binary files differnew file mode 100644 index 0000000..8f99c51 --- /dev/null +++ b/portage_with_autodep/pym/portage/elog/mod_save_summary.pyo diff --git a/portage_with_autodep/pym/portage/elog/mod_syslog.py b/portage_with_autodep/pym/portage/elog/mod_syslog.py index d71dab4..c8bf441 100644 --- a/portage_with_autodep/pym/portage/elog/mod_syslog.py +++ b/portage_with_autodep/pym/portage/elog/mod_syslog.py @@ -1,5 +1,5 @@ # elog/mod_syslog.py - elog dispatch module -# Copyright 2006-2007 Gentoo Foundation +# Copyright 2006-2011 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import sys @@ -7,6 +7,9 @@ import syslog from portage.const import EBUILD_PHASES from portage import _encodings +if sys.hexversion >= 0x3000000: + basestring = str + _pri = { "INFO" : syslog.LOG_INFO, "WARN" : syslog.LOG_WARNING, @@ -21,12 +24,13 @@ def process(mysettings, key, logentries, fulltext): if not phase in logentries: continue for msgtype,msgcontent in logentries[phase]: - msgtext = "".join(msgcontent) - for line in msgtext.splitlines(): + if isinstance(msgcontent, basestring): + msgcontent = [msgcontent] + for line in msgcontent: line = "%s: %s: %s" % (key, phase, line) - if sys.hexversion < 0x3000000 and isinstance(msgtext, unicode): + if sys.hexversion < 0x3000000 and not isinstance(line, bytes): # Avoid TypeError from syslog.syslog() line = line.encode(_encodings['content'], 'backslashreplace') - syslog.syslog(_pri[msgtype], line) + syslog.syslog(_pri[msgtype], line.rstrip("\n")) syslog.closelog() diff --git a/portage_with_autodep/pym/portage/elog/mod_syslog.pyo b/portage_with_autodep/pym/portage/elog/mod_syslog.pyo Binary files differnew file mode 100644 index 0000000..c7b4248 --- /dev/null +++ b/portage_with_autodep/pym/portage/elog/mod_syslog.pyo diff --git a/portage_with_autodep/pym/portage/emaint/__init__.py b/portage_with_autodep/pym/portage/emaint/__init__.py new file mode 100644 index 0000000..48bc6e2 --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/__init__.py @@ -0,0 +1,5 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +"""System health checks and maintenance utilities. +""" diff --git a/portage_with_autodep/pym/portage/emaint/defaults.py b/portage_with_autodep/pym/portage/emaint/defaults.py new file mode 100644 index 0000000..30f36af --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/defaults.py @@ -0,0 +1,25 @@ +# Copyright 2005-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +# parser option data +CHECK = {"short": "-c", "long": "--check", + "help": "Check for problems (a default option for most modules)", + 'status': "Checking %s for problems", + 'action': 'store_true', + 'func': 'check' + } + +FIX = {"short": "-f", "long": "--fix", + "help": "Attempt to fix problems (a default option for most modules)", + 'status': "Attempting to fix %s", + 'action': 'store_true', + 'func': 'fix' + } + +VERSION = {"long": "--version", + "help": "show program's version number and exit", + 'action': 'store_true', + } + +# parser options +DEFAULT_OPTIONS = {'check': CHECK, 'fix': FIX, 'version': VERSION} diff --git a/portage_with_autodep/pym/portage/emaint/main.py b/portage_with_autodep/pym/portage/emaint/main.py new file mode 100644 index 0000000..9f987fa --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/main.py @@ -0,0 +1,222 @@ +# Copyright 2005-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +from __future__ import print_function + + +import sys +import textwrap + +import portage +from portage import os +from portage.emaint.module import Modules +from portage.emaint.progress import ProgressBar +from portage.emaint.defaults import DEFAULT_OPTIONS +from portage.util._argparse import ArgumentParser + +class OptionItem(object): + """class to hold module ArgumentParser options data + """ + + def __init__(self, opt): + """ + @type opt: dictionary + @param opt: options parser options + """ + self.short = opt.get('short') + self.long = opt.get('long') + self.help = opt.get('help') + self.status = opt.get('status') + self.func = opt.get('func') + self.action = opt.get('action') + self.type = opt.get('type') + self.dest = opt.get('dest') + + @property + def pargs(self): + pargs = [] + if self.short is not None: + pargs.append(self.short) + if self.long is not None: + pargs.append(self.long) + return pargs + + @property + def kwargs(self): + # Support for keyword arguments varies depending on the action, + # so only pass in the keywords that are needed, in order + # to avoid a TypeError. + kwargs = {} + if self.help is not None: + kwargs['help'] = self.help + if self.action is not None: + kwargs['action'] = self.action + if self.type is not None: + kwargs['type'] = self.type + if self.dest is not None: + kwargs['dest'] = self.dest + return kwargs + +def usage(module_controller): + _usage = "usage: emaint [options] COMMAND" + + desc = "The emaint program provides an interface to system health " + \ + "checks and maintenance. See the emaint(1) man page " + \ + "for additional information about the following commands:" + + _usage += "\n\n" + for line in textwrap.wrap(desc, 65): + _usage += "%s\n" % line + _usage += "\nCommands:\n" + _usage += " %s" % "all".ljust(15) + \ + "Perform all supported commands\n" + textwrap.subsequent_indent = ' '.ljust(17) + for mod in module_controller.module_names: + desc = textwrap.wrap(module_controller.get_description(mod), 65) + _usage += " %s%s\n" % (mod.ljust(15), desc[0]) + for d in desc[1:]: + _usage += " %s%s\n" % (' '.ljust(15), d) + return _usage + + +def module_opts(module_controller, module): + _usage = " %s module options:\n" % module + opts = module_controller.get_func_descriptions(module) + if opts == {}: + opts = DEFAULT_OPTIONS + for opt in sorted(opts): + optd = opts[opt] + opto = " %s, %s" %(optd['short'], optd['long']) + _usage += '%s %s\n' % (opto.ljust(15),optd['help']) + _usage += '\n' + return _usage + + +class TaskHandler(object): + """Handles the running of the tasks it is given + """ + + def __init__(self, show_progress_bar=True, verbose=True, callback=None): + self.show_progress_bar = show_progress_bar + self.verbose = verbose + self.callback = callback + self.isatty = os.environ.get('TERM') != 'dumb' and sys.stdout.isatty() + self.progress_bar = ProgressBar(self.isatty, title="Emaint", max_desc_length=27) + + + def run_tasks(self, tasks, func, status=None, verbose=True, options=None): + """Runs the module tasks""" + if tasks is None or func is None: + return + for task in tasks: + inst = task() + show_progress = self.show_progress_bar and self.isatty + # check if the function is capable of progressbar + # and possibly override it off + if show_progress and hasattr(inst, 'can_progressbar'): + show_progress = inst.can_progressbar(func) + if show_progress: + self.progress_bar.reset() + self.progress_bar.set_label(func + " " + inst.name()) + onProgress = self.progress_bar.start() + else: + onProgress = None + kwargs = { + 'onProgress': onProgress, + # pass in a copy of the options so a module can not pollute or change + # them for other tasks if there is more to do. + 'options': options.copy() + } + result = getattr(inst, func)(**kwargs) + if show_progress: + # make sure the final progress is displayed + self.progress_bar.display() + print() + self.progress_bar.stop() + if self.callback: + self.callback(result) + + +def print_results(results): + if results: + print() + print("\n".join(results)) + print("\n") + + +def emaint_main(myargv): + + # Similar to emerge, emaint needs a default umask so that created + # files (such as the world file) have sane permissions. + os.umask(0o22) + + module_controller = Modules(namepath="portage.emaint.modules") + module_names = module_controller.module_names[:] + module_names.insert(0, "all") + + + parser = ArgumentParser(usage=usage(module_controller)) + # add default options + parser_options = [] + for opt in DEFAULT_OPTIONS: + parser_options.append(OptionItem(DEFAULT_OPTIONS[opt])) + for mod in module_names[1:]: + desc = module_controller.get_func_descriptions(mod) + if desc: + for opt in desc: + parser_options.append(OptionItem(desc[opt])) + for opt in parser_options: + parser.add_argument(*opt.pargs, **opt.kwargs) + + options, args = parser.parse_known_args(args=myargv) + + if options.version: + print(portage.VERSION) + return os.EX_OK + + if len(args) != 1: + parser.error("Incorrect number of arguments") + if args[0] not in module_names: + parser.error("%s target is not a known target" % args[0]) + + check_opt = None + func = status = long_action = None + for opt in parser_options: + if opt.long == '--check': + # Default action + check_opt = opt + if opt.status and getattr(options, opt.long.lstrip("-"), False): + if long_action is not None: + parser.error("--%s and %s are exclusive options" % + (long_action, opt.long)) + status = opt.status + func = opt.func + long_action = opt.long.lstrip('-') + + if long_action is None: + long_action = 'check' + func = check_opt.func + status = check_opt.status + + if args[0] == "all": + tasks = [] + for m in module_names[1:]: + #print("DEBUG: module: %s, functions: " %(m, str(module_controller.get_functions(m)))) + if long_action in module_controller.get_functions(m): + tasks.append(module_controller.get_class(m)) + elif long_action in module_controller.get_functions(args[0]): + tasks = [module_controller.get_class(args[0] )] + else: + portage.util.writemsg( + "\nERROR: module '%s' does not have option '--%s'\n\n" % + (args[0], long_action), noiselevel=-1) + portage.util.writemsg(module_opts(module_controller, args[0]), + noiselevel=-1) + sys.exit(1) + + # need to pass the parser options dict to the modules + # so they are available if needed. + task_opts = options.__dict__ + taskmaster = TaskHandler(callback=print_results) + taskmaster.run_tasks(tasks, func, status, options=task_opts) + diff --git a/portage_with_autodep/pym/portage/emaint/module.py b/portage_with_autodep/pym/portage/emaint/module.py new file mode 100644 index 0000000..64b0c64 --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/module.py @@ -0,0 +1,194 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + + +from __future__ import print_function + +from portage import os +from portage.exception import PortageException +from portage.cache.mappings import ProtectedDict + + +class InvalidModuleName(PortageException): + """An invalid or unknown module name.""" + + +class Module(object): + """Class to define and hold our plug-in module + + @type name: string + @param name: the module name + @type path: the path to the new module + """ + + def __init__(self, name, namepath): + """Some variables initialization""" + self.name = name + self._namepath = namepath + self.kids_names = [] + self.kids = {} + self.initialized = self._initialize() + + def _initialize(self): + """Initialize the plug-in module + + @rtype: boolean + """ + self.valid = False + try: + mod_name = ".".join([self._namepath, self.name]) + self._module = __import__(mod_name, [],[], ["not empty"]) + self.valid = True + except ImportError as e: + print("MODULE; failed import", mod_name, " error was:",e) + return False + self.module_spec = self._module.module_spec + for submodule in self.module_spec['provides']: + kid = self.module_spec['provides'][submodule] + kidname = kid['name'] + kid['module_name'] = '.'.join([mod_name, self.name]) + kid['is_imported'] = False + self.kids[kidname] = kid + self.kids_names.append(kidname) + return True + + def get_class(self, name): + if not name or name not in self.kids_names: + raise InvalidModuleName("Module name '%s' was invalid or not" + %name + "part of the module '%s'" %self.name) + kid = self.kids[name] + if kid['is_imported']: + module = kid['instance'] + else: + try: + module = __import__(kid['module_name'], [],[], ["not empty"]) + kid['instance'] = module + kid['is_imported'] = True + except ImportError: + raise + mod_class = getattr(module, kid['class']) + return mod_class + + +class Modules(object): + """Dynamic modules system for loading and retrieving any of the + installed emaint modules and/or provided class's + + @param path: Optional path to the "modules" directory or + defaults to the directory of this file + '/modules' + @param namepath: Optional python import path to the "modules" directory or + defaults to the directory name of this file + '.modules' + """ + + def __init__(self, path=None, namepath=None): + if path: + self._module_path = path + else: + self._module_path = os.path.join(( + os.path.dirname(os.path.realpath(__file__))), "modules") + if namepath: + self._namepath = namepath + else: + self._namepath = '.'.join(os.path.dirname( + os.path.realpath(__file__)), "modules") + self._modules = self._get_all_modules() + self.modules = ProtectedDict(self._modules) + self.module_names = sorted(self._modules) + #self.modules = {} + #for mod in self.module_names: + #self.module[mod] = LazyLoad( + + def _get_all_modules(self): + """scans the emaint modules dir for loadable modules + + @rtype: dictionary of module_plugins + """ + module_dir = self._module_path + importables = [] + names = os.listdir(module_dir) + for entry in names: + # skip any __init__ or __pycache__ files or directories + if entry.startswith('__'): + continue + try: + # test for statinfo to ensure it should a real module + # it will bail if it errors + os.lstat(os.path.join(module_dir, entry, '__init__.py')) + importables.append(entry) + except EnvironmentError: + pass + kids = {} + for entry in importables: + new_module = Module(entry, self._namepath) + for module_name in new_module.kids: + kid = new_module.kids[module_name] + kid['parent'] = new_module + kids[kid['name']] = kid + return kids + + def get_module_names(self): + """Convienence function to return the list of installed modules + available + + @rtype: list + @return: the installed module names available + """ + return self.module_names + + def get_class(self, modname): + """Retrieves a module class desired + + @type modname: string + @param modname: the module class name + """ + if modname and modname in self.module_names: + mod = self._modules[modname]['parent'].get_class(modname) + else: + raise InvalidModuleName("Module name '%s' was invalid or not" + %modname + "found") + return mod + + def get_description(self, modname): + """Retrieves the module class decription + + @type modname: string + @param modname: the module class name + @type string + @return: the modules class decription + """ + if modname and modname in self.module_names: + mod = self._modules[modname]['description'] + else: + raise InvalidModuleName("Module name '%s' was invalid or not" + %modname + "found") + return mod + + def get_functions(self, modname): + """Retrieves the module class exported function names + + @type modname: string + @param modname: the module class name + @type list + @return: the modules class exported function names + """ + if modname and modname in self.module_names: + mod = self._modules[modname]['functions'] + else: + raise InvalidModuleName("Module name '%s' was invalid or not" + %modname + "found") + return mod + + def get_func_descriptions(self, modname): + """Retrieves the module class exported functions descriptions + + @type modname: string + @param modname: the module class name + @type dictionary + @return: the modules class exported functions descriptions + """ + if modname and modname in self.module_names: + desc = self._modules[modname]['func_desc'] + else: + raise InvalidModuleName("Module name '%s' was invalid or not" + %modname + "found") + return desc diff --git a/portage_with_autodep/pym/portage/emaint/modules/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/__init__.py new file mode 100644 index 0000000..f67197d --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/__init__.py @@ -0,0 +1,5 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +"""Plug-in modules for system health checks and maintenance. +""" diff --git a/portage_with_autodep/pym/portage/emaint/modules/binhost/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/binhost/__init__.py new file mode 100644 index 0000000..c60e8bc --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/binhost/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +"""Scan and generate metadata indexes for binary packages. +""" + + +module_spec = { + 'name': 'binhost', + 'description': __doc__, + 'provides':{ + 'module1': { + 'name': "binhost", + 'class': "BinhostHandler", + 'description': __doc__, + 'functions': ['check', 'fix'], + 'func_desc': {} + } + } + } diff --git a/portage_with_autodep/pym/portage/emaint/modules/binhost/binhost.py b/portage_with_autodep/pym/portage/emaint/modules/binhost/binhost.py new file mode 100644 index 0000000..c297545 --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/binhost/binhost.py @@ -0,0 +1,163 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import errno +import stat + +import portage +from portage import os +from portage.util import writemsg + +import sys +if sys.hexversion >= 0x3000000: + long = int + +class BinhostHandler(object): + + short_desc = "Generate a metadata index for binary packages" + + def name(): + return "binhost" + name = staticmethod(name) + + def __init__(self): + eroot = portage.settings['EROOT'] + self._bintree = portage.db[eroot]["bintree"] + self._bintree.populate() + self._pkgindex_file = self._bintree._pkgindex_file + self._pkgindex = self._bintree._load_pkgindex() + + def _need_update(self, cpv, data): + + if "MD5" not in data: + return True + + size = data.get("SIZE") + if size is None: + return True + + mtime = data.get("MTIME") + if mtime is None: + return True + + pkg_path = self._bintree.getname(cpv) + try: + s = os.lstat(pkg_path) + except OSError as e: + if e.errno not in (errno.ENOENT, errno.ESTALE): + raise + # We can't update the index for this one because + # it disappeared. + return False + + try: + if long(mtime) != s[stat.ST_MTIME]: + return True + if long(size) != long(s.st_size): + return True + except ValueError: + return True + + return False + + def check(self, **kwargs): + onProgress = kwargs.get('onProgress', None) + missing = [] + cpv_all = self._bintree.dbapi.cpv_all() + cpv_all.sort() + maxval = len(cpv_all) + if onProgress: + onProgress(maxval, 0) + pkgindex = self._pkgindex + missing = [] + metadata = {} + for d in pkgindex.packages: + metadata[d["CPV"]] = d + for i, cpv in enumerate(cpv_all): + d = metadata.get(cpv) + if not d or self._need_update(cpv, d): + missing.append(cpv) + if onProgress: + onProgress(maxval, i+1) + errors = ["'%s' is not in Packages" % cpv for cpv in missing] + stale = set(metadata).difference(cpv_all) + for cpv in stale: + errors.append("'%s' is not in the repository" % cpv) + return errors + + def fix(self, **kwargs): + onProgress = kwargs.get('onProgress', None) + bintree = self._bintree + cpv_all = self._bintree.dbapi.cpv_all() + cpv_all.sort() + missing = [] + maxval = 0 + if onProgress: + onProgress(maxval, 0) + pkgindex = self._pkgindex + missing = [] + metadata = {} + for d in pkgindex.packages: + metadata[d["CPV"]] = d + + for i, cpv in enumerate(cpv_all): + d = metadata.get(cpv) + if not d or self._need_update(cpv, d): + missing.append(cpv) + + stale = set(metadata).difference(cpv_all) + if missing or stale: + from portage import locks + pkgindex_lock = locks.lockfile( + self._pkgindex_file, wantnewlockfile=1) + try: + # Repopulate with lock held. + bintree._populate() + cpv_all = self._bintree.dbapi.cpv_all() + cpv_all.sort() + + pkgindex = bintree._load_pkgindex() + self._pkgindex = pkgindex + + metadata = {} + for d in pkgindex.packages: + metadata[d["CPV"]] = d + + # Recount missing packages, with lock held. + del missing[:] + for i, cpv in enumerate(cpv_all): + d = metadata.get(cpv) + if not d or self._need_update(cpv, d): + missing.append(cpv) + + maxval = len(missing) + for i, cpv in enumerate(missing): + try: + metadata[cpv] = bintree._pkgindex_entry(cpv) + except portage.exception.InvalidDependString: + writemsg("!!! Invalid binary package: '%s'\n" % \ + bintree.getname(cpv), noiselevel=-1) + + if onProgress: + onProgress(maxval, i+1) + + for cpv in set(metadata).difference( + self._bintree.dbapi.cpv_all()): + del metadata[cpv] + + # We've updated the pkgindex, so set it to + # repopulate when necessary. + bintree.populated = False + + del pkgindex.packages[:] + pkgindex.packages.extend(metadata.values()) + bintree._pkgindex_write(self._pkgindex) + + finally: + locks.unlockfile(pkgindex_lock) + + if onProgress: + if maxval == 0: + maxval = 1 + onProgress(maxval, maxval) + return None diff --git a/portage_with_autodep/pym/portage/emaint/modules/config/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/config/__init__.py new file mode 100644 index 0000000..f0585b3 --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/config/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +"""Check and clean the config tracker list for uninstalled packages. +""" + + +module_spec = { + 'name': 'config', + 'description': __doc__, + 'provides':{ + 'module1': { + 'name': "cleanconfmem", + 'class': "CleanConfig", + 'description': __doc__, + 'functions': ['check', 'fix'], + 'func_desc': {} + } + } + } diff --git a/portage_with_autodep/pym/portage/emaint/modules/config/config.py b/portage_with_autodep/pym/portage/emaint/modules/config/config.py new file mode 100644 index 0000000..dad024b --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/config/config.py @@ -0,0 +1,79 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import portage +from portage import os +from portage.const import PRIVATE_PATH +from portage.util import grabdict, writedict + +class CleanConfig(object): + + short_desc = "Discard any no longer installed configs from emerge's tracker list" + + def __init__(self): + self._root = portage.settings["ROOT"] + self.target = os.path.join(portage.settings["EROOT"], PRIVATE_PATH, 'config') + + def name(): + return "cleanconfmem" + name = staticmethod(name) + + def load_configlist(self): + return grabdict(self.target) + + def check(self, **kwargs): + onProgress = kwargs.get('onProgress', None) + configs = self.load_configlist() + messages = [] + maxval = len(configs) + if onProgress: + onProgress(maxval, 0) + i = 0 + keys = sorted(configs) + for config in keys: + if not os.path.exists(config): + messages.append(" %s" % config) + if onProgress: + onProgress(maxval, i+1) + i += 1 + return self._format_output(messages) + + def fix(self, **kwargs): + onProgress = kwargs.get('onProgress', None) + configs = self.load_configlist() + messages = [] + maxval = len(configs) + if onProgress: + onProgress(maxval, 0) + i = 0 + + root = self._root + if root == "/": + root = None + modified = False + for config in sorted(configs): + if root is None: + full_path = config + else: + full_path = os.path.join(root, config.lstrip(os.sep)) + if not os.path.exists(full_path): + modified = True + configs.pop(config) + messages.append(" %s" % config) + if onProgress: + onProgress(maxval, i+1) + i += 1 + if modified: + writedict(configs, self.target) + return self._format_output(messages, True) + + def _format_output(self, messages=[], cleaned=False): + output = [] + if messages: + output.append('Not Installed:') + output += messages + tot = '------------------------------------\n Total %i Not installed' + if cleaned: + tot += ' ...Cleaned' + output.append(tot % len(messages)) + return output diff --git a/portage_with_autodep/pym/portage/emaint/modules/logs/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/logs/__init__.py new file mode 100644 index 0000000..0407efe --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/logs/__init__.py @@ -0,0 +1,45 @@ +# Copyright 2005-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +"""Check and clean old logs in the PORT_LOGDIR. +""" + + +module_spec = { + 'name': 'logs', + 'description': __doc__, + 'provides':{ + 'module1': { + 'name': "logs", + 'class': "CleanLogs", + 'description': __doc__, + 'functions': ['check','clean'], + 'func_desc': { + 'clean': { + "short": "-C", "long": "--clean", + "help": "Cleans out logs more than 7 days old (cleanlogs only)" + \ + " module-options: -t, -p", + 'status': "Cleaning %s", + 'action': 'store_true', + 'func': 'clean', + }, + 'time': { + "short": "-t", "long": "--time", + "help": "(cleanlogs only): -t, --time Delete logs older than NUM of days", + 'status': "", + 'type': int, + 'dest': 'NUM', + 'func': 'clean' + }, + 'pretend': { + "short": "-p", "long": "--pretend", + "help": "(cleanlogs only): -p, --pretend Output logs that would be deleted", + 'status': "", + 'action': 'store_true', + 'dest': 'pretend', + 'func': 'clean' + } + } + } + } + } diff --git a/portage_with_autodep/pym/portage/emaint/modules/logs/logs.py b/portage_with_autodep/pym/portage/emaint/modules/logs/logs.py new file mode 100644 index 0000000..fe65cf5 --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/logs/logs.py @@ -0,0 +1,103 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import portage +from portage import os +from portage.util import shlex_split, varexpand + +## default clean command from make.globals +## PORT_LOGDIR_CLEAN = 'find "${PORT_LOGDIR}" -type f ! -name "summary.log*" -mtime +7 -delete' + +class CleanLogs(object): + + short_desc = "Clean PORT_LOGDIR logs" + + def name(): + return "logs" + name = staticmethod(name) + + + def can_progressbar(self, func): + return False + + + def check(self, **kwargs): + if kwargs: + options = kwargs.get('options', None) + if options: + options['pretend'] = True + return self.clean(**kwargs) + + + def clean(self, **kwargs): + """Log directory cleaning function + + @param **kwargs: optional dictionary of values used in this function are: + settings: portage settings instance: defaults to portage.settings + "PORT_LOGDIR": directory to clean + "PORT_LOGDIR_CLEAN": command for cleaning the logs. + options: dict: + 'NUM': int: number of days + 'pretend': boolean + """ + messages = [] + num_of_days = None + pretend = False + if kwargs: + # convuluted, I know, but portage.settings does not exist in + # kwargs.get() when called from _emerge.main.clean_logs() + settings = kwargs.get('settings', None) + if not settings: + settings = portage.settings + options = kwargs.get('options', None) + if options: + num_of_days = options.get('NUM', None) + pretend = options.get('pretend', False) + + clean_cmd = settings.get("PORT_LOGDIR_CLEAN") + if clean_cmd: + clean_cmd = shlex_split(clean_cmd) + if '-mtime' in clean_cmd and num_of_days is not None: + if num_of_days == 0: + i = clean_cmd.index('-mtime') + clean_cmd.remove('-mtime') + clean_cmd.pop(i) + else: + clean_cmd[clean_cmd.index('-mtime') +1] = \ + '+%s' % str(num_of_days) + if pretend: + if "-delete" in clean_cmd: + clean_cmd.remove("-delete") + + if not clean_cmd: + return [] + rval = self._clean_logs(clean_cmd, settings) + messages += self._convert_errors(rval) + return messages + + + @staticmethod + def _clean_logs(clean_cmd, settings): + logdir = settings.get("PORT_LOGDIR") + if logdir is None or not os.path.isdir(logdir): + return + + variables = {"PORT_LOGDIR" : logdir} + cmd = [varexpand(x, mydict=variables) for x in clean_cmd] + + try: + rval = portage.process.spawn(cmd, env=os.environ) + except portage.exception.CommandNotFound: + rval = 127 + return rval + + + @staticmethod + def _convert_errors(rval): + msg = [] + if rval != os.EX_OK: + msg.append("PORT_LOGDIR_CLEAN command returned %s" + % ("%d" % rval if rval else "None")) + msg.append("See the make.conf(5) man page for " + "PORT_LOGDIR_CLEAN usage instructions.") + return msg diff --git a/portage_with_autodep/pym/portage/emaint/modules/move/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/move/__init__.py new file mode 100644 index 0000000..d31d7b3 --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/move/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +"""Perform package move updates for installed and binary packages. +""" + + +module_spec = { + 'name': 'move', + 'description': __doc__, + 'provides':{ + 'module1': { + 'name': "moveinst", + 'class': "MoveInstalled", + 'description': __doc__, + 'options': ['check', 'fix'], + 'functions': ['check', 'fix'], + 'func_desc': { + } + }, + 'module2':{ + 'name': "movebin", + 'class': "MoveBinary", + 'description': "Perform package move updates for binary packages", + 'functions': ['check', 'fix'], + 'func_desc': { + } + } + } + } diff --git a/portage_with_autodep/pym/portage/emaint/modules/move/move.py b/portage_with_autodep/pym/portage/emaint/modules/move/move.py new file mode 100644 index 0000000..ef674d4 --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/move/move.py @@ -0,0 +1,180 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import portage +from portage import os +from portage.exception import InvalidData +from _emerge.Package import Package +from portage.versions import _pkg_str + +class MoveHandler(object): + + def __init__(self, tree, porttree): + self._tree = tree + self._portdb = porttree.dbapi + self._update_keys = Package._dep_keys + ("PROVIDE",) + self._master_repo = \ + self._portdb.getRepositoryName(self._portdb.porttree_root) + + def _grab_global_updates(self): + from portage.update import grab_updates, parse_updates + retupdates = {} + errors = [] + + for repo_name in self._portdb.getRepositories(): + repo = self._portdb.getRepositoryPath(repo_name) + updpath = os.path.join(repo, "profiles", "updates") + if not os.path.isdir(updpath): + continue + + try: + rawupdates = grab_updates(updpath) + except portage.exception.DirectoryNotFound: + rawupdates = [] + upd_commands = [] + for mykey, mystat, mycontent in rawupdates: + commands, errors = parse_updates(mycontent) + upd_commands.extend(commands) + errors.extend(errors) + retupdates[repo_name] = upd_commands + + if self._master_repo in retupdates: + retupdates['DEFAULT'] = retupdates[self._master_repo] + + return retupdates, errors + + def check(self, **kwargs): + onProgress = kwargs.get('onProgress', None) + allupdates, errors = self._grab_global_updates() + # Matching packages and moving them is relatively fast, so the + # progress bar is updated in indeterminate mode. + match = self._tree.dbapi.match + aux_get = self._tree.dbapi.aux_get + pkg_str = self._tree.dbapi._pkg_str + settings = self._tree.dbapi.settings + if onProgress: + onProgress(0, 0) + for repo, updates in allupdates.items(): + if repo == 'DEFAULT': + continue + if not updates: + continue + + def repo_match(repository): + return repository == repo or \ + (repo == self._master_repo and \ + repository not in allupdates) + + for i, update_cmd in enumerate(updates): + if update_cmd[0] == "move": + origcp, newcp = update_cmd[1:] + for cpv in match(origcp): + try: + cpv = pkg_str(cpv, origcp.repo) + except (KeyError, InvalidData): + continue + if repo_match(cpv.repo): + errors.append("'%s' moved to '%s'" % (cpv, newcp)) + elif update_cmd[0] == "slotmove": + pkg, origslot, newslot = update_cmd[1:] + atom = pkg.with_slot(origslot) + for cpv in match(atom): + try: + cpv = pkg_str(cpv, atom.repo) + except (KeyError, InvalidData): + continue + if repo_match(cpv.repo): + errors.append("'%s' slot moved from '%s' to '%s'" % \ + (cpv, origslot, newslot)) + if onProgress: + onProgress(0, 0) + + # Searching for updates in all the metadata is relatively slow, so this + # is where the progress bar comes out of indeterminate mode. + cpv_all = self._tree.dbapi.cpv_all() + cpv_all.sort() + maxval = len(cpv_all) + meta_keys = self._update_keys + self._portdb._pkg_str_aux_keys + if onProgress: + onProgress(maxval, 0) + for i, cpv in enumerate(cpv_all): + try: + metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys))) + except KeyError: + continue + try: + pkg = _pkg_str(cpv, metadata=metadata, settings=settings) + except InvalidData: + continue + metadata = dict((k, metadata[k]) for k in self._update_keys) + try: + updates = allupdates[pkg.repo] + except KeyError: + try: + updates = allupdates['DEFAULT'] + except KeyError: + continue + if not updates: + continue + metadata_updates = \ + portage.update_dbentries(updates, metadata, parent=pkg) + if metadata_updates: + errors.append("'%s' has outdated metadata" % cpv) + if onProgress: + onProgress(maxval, i+1) + return errors + + def fix(self, **kwargs): + onProgress = kwargs.get('onProgress', None) + allupdates, errors = self._grab_global_updates() + # Matching packages and moving them is relatively fast, so the + # progress bar is updated in indeterminate mode. + move = self._tree.dbapi.move_ent + slotmove = self._tree.dbapi.move_slot_ent + if onProgress: + onProgress(0, 0) + for repo, updates in allupdates.items(): + if repo == 'DEFAULT': + continue + if not updates: + continue + + def repo_match(repository): + return repository == repo or \ + (repo == self._master_repo and \ + repository not in allupdates) + + for i, update_cmd in enumerate(updates): + if update_cmd[0] == "move": + move(update_cmd, repo_match=repo_match) + elif update_cmd[0] == "slotmove": + slotmove(update_cmd, repo_match=repo_match) + if onProgress: + onProgress(0, 0) + + # Searching for updates in all the metadata is relatively slow, so this + # is where the progress bar comes out of indeterminate mode. + self._tree.dbapi.update_ents(allupdates, onProgress=onProgress) + return errors + +class MoveInstalled(MoveHandler): + + short_desc = "Perform package move updates for installed packages" + + def name(): + return "moveinst" + name = staticmethod(name) + def __init__(self): + eroot = portage.settings['EROOT'] + MoveHandler.__init__(self, portage.db[eroot]["vartree"], portage.db[eroot]["porttree"]) + +class MoveBinary(MoveHandler): + + short_desc = "Perform package move updates for binary packages" + + def name(): + return "movebin" + name = staticmethod(name) + def __init__(self): + eroot = portage.settings['EROOT'] + MoveHandler.__init__(self, portage.db[eroot]["bintree"], portage.db[eroot]['porttree']) diff --git a/portage_with_autodep/pym/portage/emaint/modules/resume/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/resume/__init__.py new file mode 100644 index 0000000..965e8f9 --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/resume/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +"""Check and fix problems in the resume and/or resume_backup files. +""" + + +module_spec = { + 'name': 'resume', + 'description': __doc__, + 'provides':{ + 'module1': { + 'name': "cleanresume", + 'class': "CleanResume", + 'description': "Discard emerge --resume merge lists", + 'functions': ['check', 'fix'], + 'func_desc': {} + } + } + } diff --git a/portage_with_autodep/pym/portage/emaint/modules/resume/resume.py b/portage_with_autodep/pym/portage/emaint/modules/resume/resume.py new file mode 100644 index 0000000..1bada52 --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/resume/resume.py @@ -0,0 +1,58 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import portage + + +class CleanResume(object): + + short_desc = "Discard emerge --resume merge lists" + + def name(): + return "cleanresume" + name = staticmethod(name) + + def check(self, **kwargs): + onProgress = kwargs.get('onProgress', None) + messages = [] + mtimedb = portage.mtimedb + resume_keys = ("resume", "resume_backup") + maxval = len(resume_keys) + if onProgress: + onProgress(maxval, 0) + for i, k in enumerate(resume_keys): + try: + d = mtimedb.get(k) + if d is None: + continue + if not isinstance(d, dict): + messages.append("unrecognized resume list: '%s'" % k) + continue + mergelist = d.get("mergelist") + if mergelist is None or not hasattr(mergelist, "__len__"): + messages.append("unrecognized resume list: '%s'" % k) + continue + messages.append("resume list '%s' contains %d packages" % \ + (k, len(mergelist))) + finally: + if onProgress: + onProgress(maxval, i+1) + return messages + + def fix(self, **kwargs): + onProgress = kwargs.get('onProgress', None) + delete_count = 0 + mtimedb = portage.mtimedb + resume_keys = ("resume", "resume_backup") + maxval = len(resume_keys) + if onProgress: + onProgress(maxval, 0) + for i, k in enumerate(resume_keys): + try: + if mtimedb.pop(k, None) is not None: + delete_count += 1 + finally: + if onProgress: + onProgress(maxval, i+1) + if delete_count: + mtimedb.commit() diff --git a/portage_with_autodep/pym/portage/emaint/modules/world/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/world/__init__.py new file mode 100644 index 0000000..3f62270 --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/world/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +"""Check and fix problems in the world file. +""" + + +module_spec = { + 'name': 'world', + 'description': __doc__, + 'provides':{ + 'module1':{ + 'name': "world", + 'class': "WorldHandler", + 'description': __doc__, + 'functions': ['check', 'fix'], + 'func_desc': {} + } + } + } diff --git a/portage_with_autodep/pym/portage/emaint/modules/world/world.py b/portage_with_autodep/pym/portage/emaint/modules/world/world.py new file mode 100644 index 0000000..2c9dbff --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/modules/world/world.py @@ -0,0 +1,89 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import portage +from portage import os + + +class WorldHandler(object): + + short_desc = "Fix problems in the world file" + + def name(): + return "world" + name = staticmethod(name) + + def __init__(self): + self.invalid = [] + self.not_installed = [] + self.okay = [] + from portage._sets import load_default_config + setconfig = load_default_config(portage.settings, + portage.db[portage.settings['EROOT']]) + self._sets = setconfig.getSets() + + def _check_world(self, onProgress): + eroot = portage.settings['EROOT'] + self.world_file = os.path.join(eroot, portage.const.WORLD_FILE) + self.found = os.access(self.world_file, os.R_OK) + vardb = portage.db[eroot]["vartree"].dbapi + + from portage._sets import SETPREFIX + sets = self._sets + world_atoms = list(sets["selected"]) + maxval = len(world_atoms) + if onProgress: + onProgress(maxval, 0) + for i, atom in enumerate(world_atoms): + if not isinstance(atom, portage.dep.Atom): + if atom.startswith(SETPREFIX): + s = atom[len(SETPREFIX):] + if s in sets: + self.okay.append(atom) + else: + self.not_installed.append(atom) + else: + self.invalid.append(atom) + if onProgress: + onProgress(maxval, i+1) + continue + okay = True + if not vardb.match(atom): + self.not_installed.append(atom) + okay = False + if okay: + self.okay.append(atom) + if onProgress: + onProgress(maxval, i+1) + + def check(self, **kwargs): + onProgress = kwargs.get('onProgress', None) + self._check_world(onProgress) + errors = [] + if self.found: + errors += ["'%s' is not a valid atom" % x for x in self.invalid] + errors += ["'%s' is not installed" % x for x in self.not_installed] + else: + errors.append(self.world_file + " could not be opened for reading") + return errors + + def fix(self, **kwargs): + onProgress = kwargs.get('onProgress', None) + world_set = self._sets["selected"] + world_set.lock() + try: + world_set.load() # maybe it's changed on disk + before = set(world_set) + self._check_world(onProgress) + after = set(self.okay) + errors = [] + if before != after: + try: + world_set.replace(self.okay) + except portage.exception.PortageException: + errors.append("%s could not be opened for writing" % \ + self.world_file) + return errors + finally: + world_set.unlock() + diff --git a/portage_with_autodep/pym/portage/emaint/progress.py b/portage_with_autodep/pym/portage/emaint/progress.py new file mode 100644 index 0000000..e43c2af --- /dev/null +++ b/portage_with_autodep/pym/portage/emaint/progress.py @@ -0,0 +1,61 @@ +# Copyright 2005-2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import time +import signal + +import portage + + +class ProgressHandler(object): + def __init__(self): + self.reset() + + def reset(self): + self.curval = 0 + self.maxval = 0 + self.last_update = 0 + self.min_display_latency = 0.2 + + def onProgress(self, maxval, curval): + self.maxval = maxval + self.curval = curval + cur_time = time.time() + if cur_time - self.last_update >= self.min_display_latency: + self.last_update = cur_time + self.display() + + def display(self): + raise NotImplementedError(self) + + +class ProgressBar(ProgressHandler): + """Class to set up and return a Progress Bar""" + + def __init__(self, isatty, **kwargs): + self.isatty = isatty + self.kwargs = kwargs + ProgressHandler.__init__(self) + self.progressBar = None + + def start(self): + if self.isatty: + self.progressBar = portage.output.TermProgressBar(**self.kwargs) + signal.signal(signal.SIGWINCH, self.sigwinch_handler) + else: + self.onProgress = None + return self.onProgress + + def set_label(self, _label): + self.kwargs['label'] = _label + + def display(self): + self.progressBar.set(self.curval, self.maxval) + + def sigwinch_handler(self, signum, frame): + lines, self.progressBar.term_columns = \ + portage.output.get_term_size() + + def stop(self): + signal.signal(signal.SIGWINCH, signal.SIG_DFL) + diff --git a/portage_with_autodep/pym/portage/env/__init__.pyo b/portage_with_autodep/pym/portage/env/__init__.pyo Binary files differnew file mode 100644 index 0000000..846aea3 --- /dev/null +++ b/portage_with_autodep/pym/portage/env/__init__.pyo diff --git a/portage_with_autodep/pym/portage/env/config.pyo b/portage_with_autodep/pym/portage/env/config.pyo Binary files differnew file mode 100644 index 0000000..13c2e86 --- /dev/null +++ b/portage_with_autodep/pym/portage/env/config.pyo diff --git a/portage_with_autodep/pym/portage/env/loaders.py b/portage_with_autodep/pym/portage/env/loaders.py index b540fbb..372bc12 100644 --- a/portage_with_autodep/pym/portage/env/loaders.py +++ b/portage_with_autodep/pym/portage/env/loaders.py @@ -40,7 +40,7 @@ def RecursiveFileLoader(filename): @param filename: name of a file/directory to traverse @rtype: list - @returns: List of files to process + @return: List of files to process """ try: @@ -139,7 +139,7 @@ class FileLoader(DataLoader): load all files in self.fname @type: Boolean @rtype: tuple - @returns: + @return: Returns (data,errors), both may be empty dicts or populated. """ data = {} diff --git a/portage_with_autodep/pym/portage/env/loaders.pyo b/portage_with_autodep/pym/portage/env/loaders.pyo Binary files differnew file mode 100644 index 0000000..2622a9f --- /dev/null +++ b/portage_with_autodep/pym/portage/env/loaders.pyo diff --git a/portage_with_autodep/pym/portage/env/validators.pyo b/portage_with_autodep/pym/portage/env/validators.pyo Binary files differnew file mode 100644 index 0000000..cd18adb --- /dev/null +++ b/portage_with_autodep/pym/portage/env/validators.pyo diff --git a/portage_with_autodep/pym/portage/exception.py b/portage_with_autodep/pym/portage/exception.py index 7891120..5ccd750 100644 --- a/portage_with_autodep/pym/portage/exception.py +++ b/portage_with_autodep/pym/portage/exception.py @@ -78,6 +78,10 @@ class OperationNotPermitted(PortageException): from errno import EPERM as errno """An operation was not permitted operating system""" +class OperationNotSupported(PortageException): + from errno import EOPNOTSUPP as errno + """Operation not supported""" + class PermissionDenied(PortageException): from errno import EACCES as errno """Permission denied""" diff --git a/portage_with_autodep/pym/portage/exception.pyo b/portage_with_autodep/pym/portage/exception.pyo Binary files differnew file mode 100644 index 0000000..3a60e7c --- /dev/null +++ b/portage_with_autodep/pym/portage/exception.pyo diff --git a/portage_with_autodep/pym/portage/getbinpkg.py b/portage_with_autodep/pym/portage/getbinpkg.py index a511f51..212f788 100644 --- a/portage_with_autodep/pym/portage/getbinpkg.py +++ b/portage_with_autodep/pym/portage/getbinpkg.py @@ -1,5 +1,5 @@ # getbinpkg.py -- Portage binary-package helper functions -# Copyright 2003-2011 Gentoo Foundation +# Copyright 2003-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 from portage.output import colorize @@ -8,7 +8,10 @@ from portage.localization import _ import portage from portage import os from portage import _encodings +from portage import _unicode_decode from portage import _unicode_encode +from portage.package.ebuild.fetch import _hide_url_passwd +from _emerge.Package import _all_metadata_keys import sys import socket @@ -65,8 +68,15 @@ def make_metadata_dict(data): myid,myglob = data mydict = {} - for x in portage.xpak.getindex_mem(myid): - mydict[x] = portage.xpak.getitem(data,x) + for k_bytes in portage.xpak.getindex_mem(myid): + k = _unicode_decode(k_bytes, + encoding=_encodings['repo.content'], errors='replace') + if k not in _all_metadata_keys and \ + k != "CATEGORY": + continue + v = _unicode_decode(portage.xpak.getitem(data, k_bytes), + encoding=_encodings['repo.content'], errors='replace') + mydict[k] = v return mydict @@ -149,11 +159,16 @@ def create_conn(baseurl,conn=None): http_headers = {} http_params = {} if username and password: + try: + encodebytes = base64.encodebytes + except AttributeError: + # Python 2 + encodebytes = base64.encodestring http_headers = { - "Authorization": "Basic %s" % - base64.encodestring("%s:%s" % (username, password)).replace( - "\012", - "" + b"Authorization": "Basic %s" % \ + encodebytes(_unicode_encode("%s:%s" % (username, password))).replace( + b"\012", + b"" ), } @@ -354,7 +369,7 @@ def dir_get_list(baseurl,conn=None): if page: parser = ParseLinks() - parser.feed(page) + parser.feed(_unicode_decode(page)) del page listing = parser.get_anchors() else: @@ -542,7 +557,7 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache= out.write(_("Loaded metadata pickle.\n")) out.flush() metadatafile.close() - except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError): + except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError): metadata = {} if baseurl not in metadata: metadata[baseurl]={} @@ -564,7 +579,8 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache= try: filelist = dir_get_list(baseurl, conn) except portage.exception.PortageException as e: - sys.stderr.write(_("!!! Error connecting to '%s'.\n") % baseurl) + sys.stderr.write(_("!!! Error connecting to '%s'.\n") % + _hide_url_passwd(baseurl)) sys.stderr.write("!!! %s\n" % str(e)) del e return metadata[baseurl]["data"] diff --git a/portage_with_autodep/pym/portage/getbinpkg.pyo b/portage_with_autodep/pym/portage/getbinpkg.pyo Binary files differnew file mode 100644 index 0000000..53ec2e9 --- /dev/null +++ b/portage_with_autodep/pym/portage/getbinpkg.pyo diff --git a/portage_with_autodep/pym/portage/glsa.py b/portage_with_autodep/pym/portage/glsa.py index a784d14..1857695 100644 --- a/portage_with_autodep/pym/portage/glsa.py +++ b/portage_with_autodep/pym/portage/glsa.py @@ -1,4 +1,4 @@ -# Copyright 2003-2011 Gentoo Foundation +# Copyright 2003-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 from __future__ import absolute_import @@ -17,7 +17,7 @@ from portage import os from portage import _encodings from portage import _unicode_decode from portage import _unicode_encode -from portage.versions import pkgsplit, catpkgsplit, pkgcmp, best +from portage.versions import pkgsplit, vercmp, best from portage.util import grabfile from portage.const import CACHE_PATH from portage.localization import _ @@ -372,17 +372,14 @@ def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize= for u in unaffectedList: mylist = match(u, portdbapi, match_type="match-all") for c in mylist: - c_pv = catpkgsplit(c) - i_pv = catpkgsplit(best(v_installed)) - if pkgcmp(c_pv[1:], i_pv[1:]) > 0 \ + i = best(v_installed) + if vercmp(c.version, i.version) > 0 \ and (rValue == None \ or not match("="+rValue, portdbapi) \ - or (minimize ^ (pkgcmp(c_pv[1:], catpkgsplit(rValue)[1:]) > 0)) \ + or (minimize ^ (vercmp(c.version, rValue.version) > 0)) \ and match("="+c, portdbapi)) \ and portdbapi.aux_get(c, ["SLOT"]) == vardbapi.aux_get(best(v_installed), ["SLOT"]): - rValue = c_pv[0]+"/"+c_pv[1]+"-"+c_pv[2] - if c_pv[3] != "r0": # we don't like -r0 for display - rValue += "-"+c_pv[3] + rValue = c return rValue def format_date(datestr): @@ -488,7 +485,7 @@ class Glsa: @type myfile: String @param myfile: Filename to grab the XML data from @rtype: None - @returns: None + @return: None """ self.DOM = xml.dom.minidom.parse(myfile) if not self.DOM.doctype: @@ -634,7 +631,7 @@ class Glsa: architectures. @rtype: Boolean - @returns: True if the system is affected, False if not + @return: True if the system is affected, False if not """ rValue = False for k in self.packages: @@ -654,7 +651,7 @@ class Glsa: GLSA was already applied. @rtype: Boolean - @returns: True if the GLSA was applied, False if not + @return: True if the GLSA was applied, False if not """ return (self.nr in get_applied_glsas(self.config)) @@ -665,7 +662,7 @@ class Glsa: applied or on explicit user request. @rtype: None - @returns: None + @return: None """ if not self.isApplied(): checkfile = io.open( diff --git a/portage_with_autodep/pym/portage/glsa.pyo b/portage_with_autodep/pym/portage/glsa.pyo Binary files differnew file mode 100644 index 0000000..65162f1 --- /dev/null +++ b/portage_with_autodep/pym/portage/glsa.pyo diff --git a/portage_with_autodep/pym/portage/localization.pyo b/portage_with_autodep/pym/portage/localization.pyo Binary files differnew file mode 100644 index 0000000..e992e3a --- /dev/null +++ b/portage_with_autodep/pym/portage/localization.pyo diff --git a/portage_with_autodep/pym/portage/locks.py b/portage_with_autodep/pym/portage/locks.py index 9ed1d6a..59fbc6e 100644 --- a/portage_with_autodep/pym/portage/locks.py +++ b/portage_with_autodep/pym/portage/locks.py @@ -1,5 +1,5 @@ # portage: Lock management code -# Copyright 2004-2010 Gentoo Foundation +# Copyright 2004-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \ @@ -8,13 +8,13 @@ __all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \ import errno import fcntl -import stat +import platform import sys import time +import warnings import portage -from portage import os -from portage.const import PORTAGE_BIN_PATH +from portage import os, _encodings, _unicode_decode from portage.exception import DirectoryNotFound, FileNotFound, \ InvalidData, TryAgain, OperationNotPermitted, PermissionDenied from portage.data import portage_gid @@ -25,12 +25,30 @@ if sys.hexversion >= 0x3000000: basestring = str HARDLINK_FD = -2 +_HARDLINK_POLL_LATENCY = 3 # seconds _default_lock_fn = fcntl.lockf +if platform.python_implementation() == 'PyPy': + # workaround for https://bugs.pypy.org/issue747 + _default_lock_fn = fcntl.flock + # Used by emerge in order to disable the "waiting for lock" message # so that it doesn't interfere with the status display. _quiet = False + +_open_fds = set() + +def _close_fds(): + """ + This is intended to be called after a fork, in order to close file + descriptors for locks held by the parent process. This can be called + safely after a fork without exec, unlike the _setup_pipes close_fds + behavior. + """ + while _open_fds: + os.close(_open_fds.pop()) + def lockdir(mydir, flags=0): return lockfile(mydir, wantnewlockfile=1, flags=flags) def unlockdir(mylock): @@ -46,19 +64,31 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0, if not mypath: raise InvalidData(_("Empty path given")) + # Support for file object or integer file descriptor parameters is + # deprecated due to ambiguity in whether or not it's safe to close + # the file descriptor, making it prone to "Bad file descriptor" errors + # or file descriptor leaks. if isinstance(mypath, basestring) and mypath[-1] == '/': mypath = mypath[:-1] + lockfilename_path = mypath if hasattr(mypath, 'fileno'): + warnings.warn("portage.locks.lockfile() support for " + "file object parameters is deprecated. Use a file path instead.", + DeprecationWarning, stacklevel=2) + lockfilename_path = getattr(mypath, 'name', None) mypath = mypath.fileno() if isinstance(mypath, int): + warnings.warn("portage.locks.lockfile() support for integer file " + "descriptor parameters is deprecated. Use a file path instead.", + DeprecationWarning, stacklevel=2) lockfilename = mypath wantnewlockfile = 0 unlinkfile = 0 elif wantnewlockfile: base, tail = os.path.split(mypath) lockfilename = os.path.join(base, "." + tail + ".portage_lockfile") - del base, tail + lockfilename_path = lockfilename unlinkfile = 1 else: lockfilename = mypath @@ -112,6 +142,8 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0, # we're waiting on lockfile and use a blocking attempt. locking_method = _default_lock_fn try: + if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ: + raise IOError(errno.ENOSYS, "Function not implemented") locking_method(myfd, fcntl.LOCK_EX|fcntl.LOCK_NB) except IOError as e: if not hasattr(e, "errno"): @@ -143,20 +175,22 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0, raise if out is not None: out.eend(os.EX_OK) - elif e.errno == errno.ENOLCK: + elif e.errno in (errno.ENOSYS, errno.ENOLCK): # We're not allowed to lock on this FS. - os.close(myfd) - link_success = False - if lockfilename == str(lockfilename): - if wantnewlockfile: - try: - if os.stat(lockfilename)[stat.ST_NLINK] == 1: - os.unlink(lockfilename) - except OSError: - pass - link_success = hardlink_lockfile(lockfilename) + if not isinstance(lockfilename, int): + # If a file object was passed in, it's not safe + # to close the file descriptor because it may + # still be in use. + os.close(myfd) + lockfilename_path = _unicode_decode(lockfilename_path, + encoding=_encodings['fs'], errors='strict') + if not isinstance(lockfilename_path, basestring): + raise + link_success = hardlink_lockfile(lockfilename_path, + waiting_msg=waiting_msg, flags=flags) if not link_success: raise + lockfilename = lockfilename_path locking_method = None myfd = HARDLINK_FD else: @@ -172,6 +206,9 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0, mypath, wantnewlockfile=wantnewlockfile, unlinkfile=unlinkfile, waiting_msg=waiting_msg, flags=flags) + if myfd != HARDLINK_FD: + _open_fds.add(myfd) + writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1) return (lockfilename,myfd,unlinkfile,locking_method) @@ -203,7 +240,7 @@ def unlockfile(mytuple): raise InvalidData if(myfd == HARDLINK_FD): - unhardlink_lockfile(lockfilename) + unhardlink_lockfile(lockfilename, unlinkfile=unlinkfile) return True # myfd may be None here due to myfd = mypath in lockfile() @@ -212,6 +249,7 @@ def unlockfile(mytuple): writemsg(_("lockfile does not exist '%s'\n") % lockfilename,1) if myfd is not None: os.close(myfd) + _open_fds.remove(myfd) return False try: @@ -222,6 +260,7 @@ def unlockfile(mytuple): except OSError: if isinstance(lockfilename, basestring): os.close(myfd) + _open_fds.remove(myfd) raise IOError(_("Failed to unlock file '%s'\n") % lockfilename) try: @@ -243,6 +282,7 @@ def unlockfile(mytuple): else: writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1) os.close(myfd) + _open_fds.remove(myfd) return False except SystemExit: raise @@ -255,6 +295,7 @@ def unlockfile(mytuple): # open fd closed automatically on them. if isinstance(lockfilename, basestring): os.close(myfd) + _open_fds.remove(myfd) return True @@ -262,65 +303,148 @@ def unlockfile(mytuple): def hardlock_name(path): - return path+".hardlock-"+os.uname()[1]+"-"+str(os.getpid()) + base, tail = os.path.split(path) + return os.path.join(base, ".%s.hardlock-%s-%s" % + (tail, os.uname()[1], os.getpid())) def hardlink_is_mine(link,lock): try: - return os.stat(link).st_nlink == 2 + lock_st = os.stat(lock) + if lock_st.st_nlink == 2: + link_st = os.stat(link) + return lock_st.st_ino == link_st.st_ino and \ + lock_st.st_dev == link_st.st_dev except OSError: - return False + pass + return False -def hardlink_lockfile(lockfilename, max_wait=14400): +def hardlink_lockfile(lockfilename, max_wait=DeprecationWarning, + waiting_msg=None, flags=0): """Does the NFS, hardlink shuffle to ensure locking on the disk. - We create a PRIVATE lockfile, that is just a placeholder on the disk. - Then we HARDLINK the real lockfile to that private file. + We create a PRIVATE hardlink to the real lockfile, that is just a + placeholder on the disk. If our file can 2 references, then we have the lock. :) Otherwise we lather, rise, and repeat. - We default to a 4 hour timeout. """ - start_time = time.time() + if max_wait is not DeprecationWarning: + warnings.warn("The 'max_wait' parameter of " + "portage.locks.hardlink_lockfile() is now unused. Use " + "flags=os.O_NONBLOCK instead.", + DeprecationWarning, stacklevel=2) + + global _quiet + out = None + displayed_waiting_msg = False + preexisting = os.path.exists(lockfilename) myhardlock = hardlock_name(lockfilename) - reported_waiting = False - - while(time.time() < (start_time + max_wait)): - # We only need it to exist. - myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0o660) - os.close(myfd) - - if not os.path.exists(myhardlock): - raise FileNotFound( - _("Created lockfile is missing: %(filename)s") % \ - {"filename" : myhardlock}) - try: - res = os.link(myhardlock, lockfilename) - except OSError: + # myhardlock must not exist prior to our link() call, and we can + # safely unlink it since its file name is unique to our PID + try: + os.unlink(myhardlock) + except OSError as e: + if e.errno in (errno.ENOENT, errno.ESTALE): pass + else: + func_call = "unlink('%s')" % myhardlock + if e.errno == OperationNotPermitted.errno: + raise OperationNotPermitted(func_call) + elif e.errno == PermissionDenied.errno: + raise PermissionDenied(func_call) + else: + raise - if hardlink_is_mine(myhardlock, lockfilename): - # We have the lock. - if reported_waiting: - writemsg("\n", noiselevel=-1) - return True - - if reported_waiting: - writemsg(".", noiselevel=-1) + while True: + # create lockfilename if it doesn't exist yet + try: + myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR, 0o660) + except OSError as e: + func_call = "open('%s')" % lockfilename + if e.errno == OperationNotPermitted.errno: + raise OperationNotPermitted(func_call) + elif e.errno == PermissionDenied.errno: + raise PermissionDenied(func_call) + else: + raise else: - reported_waiting = True - msg = _("\nWaiting on (hardlink) lockfile: (one '.' per 3 seconds)\n" - "%(bin_path)s/clean_locks can fix stuck locks.\n" - "Lockfile: %(lockfilename)s\n") % \ - {"bin_path": PORTAGE_BIN_PATH, "lockfilename": lockfilename} - writemsg(msg, noiselevel=-1) - time.sleep(3) - - os.unlink(myhardlock) - return False + myfd_st = None + try: + myfd_st = os.fstat(myfd) + if not preexisting: + # Don't chown the file if it is preexisting, since we + # want to preserve existing permissions in that case. + if myfd_st.st_gid != portage_gid: + os.fchown(myfd, -1, portage_gid) + except OSError as e: + if e.errno not in (errno.ENOENT, errno.ESTALE): + writemsg("%s: fchown('%s', -1, %d)\n" % \ + (e, lockfilename, portage_gid), noiselevel=-1) + writemsg(_("Cannot chown a lockfile: '%s'\n") % \ + lockfilename, noiselevel=-1) + writemsg(_("Group IDs of current user: %s\n") % \ + " ".join(str(n) for n in os.getgroups()), + noiselevel=-1) + else: + # another process has removed the file, so we'll have + # to create it again + continue + finally: + os.close(myfd) + + # If fstat shows more than one hardlink, then it's extremely + # unlikely that the following link call will result in a lock, + # so optimize away the wasteful link call and sleep or raise + # TryAgain. + if myfd_st is not None and myfd_st.st_nlink < 2: + try: + os.link(lockfilename, myhardlock) + except OSError as e: + func_call = "link('%s', '%s')" % (lockfilename, myhardlock) + if e.errno == OperationNotPermitted.errno: + raise OperationNotPermitted(func_call) + elif e.errno == PermissionDenied.errno: + raise PermissionDenied(func_call) + elif e.errno in (errno.ESTALE, errno.ENOENT): + # another process has removed the file, so we'll have + # to create it again + continue + else: + raise + else: + if hardlink_is_mine(myhardlock, lockfilename): + if out is not None: + out.eend(os.EX_OK) + break + + try: + os.unlink(myhardlock) + except OSError as e: + # This should not happen, since the file name of + # myhardlock is unique to our host and PID, + # and the above link() call succeeded. + if e.errno not in (errno.ENOENT, errno.ESTALE): + raise + raise FileNotFound(myhardlock) + + if flags & os.O_NONBLOCK: + raise TryAgain(lockfilename) + + if out is None and not _quiet: + out = portage.output.EOutput() + if out is not None and not displayed_waiting_msg: + displayed_waiting_msg = True + if waiting_msg is None: + waiting_msg = _("waiting for lock on %s\n") % lockfilename + out.ebegin(waiting_msg) + + time.sleep(_HARDLINK_POLL_LATENCY) + + return True -def unhardlink_lockfile(lockfilename): +def unhardlink_lockfile(lockfilename, unlinkfile=True): myhardlock = hardlock_name(lockfilename) - if hardlink_is_mine(myhardlock, lockfilename): + if unlinkfile and hardlink_is_mine(myhardlock, lockfilename): # Make sure not to touch lockfilename unless we really have a lock. try: os.unlink(lockfilename) @@ -344,7 +468,7 @@ def hardlock_cleanup(path, remove_all_locks=False): if os.path.isfile(path+"/"+x): parts = x.split(".hardlock-") if len(parts) == 2: - filename = parts[0] + filename = parts[0][1:] hostpid = parts[1].split("-") host = "-".join(hostpid[:-1]) pid = hostpid[-1] @@ -368,7 +492,7 @@ def hardlock_cleanup(path, remove_all_locks=False): remove_all_locks: for y in mylist[x]: for z in mylist[x][y]: - filename = path+"/"+x+".hardlock-"+y+"-"+z + filename = path+"/."+x+".hardlock-"+y+"-"+z if filename == mylockname: continue try: diff --git a/portage_with_autodep/pym/portage/locks.pyo b/portage_with_autodep/pym/portage/locks.pyo Binary files differnew file mode 100644 index 0000000..9c90a2f --- /dev/null +++ b/portage_with_autodep/pym/portage/locks.pyo diff --git a/portage_with_autodep/pym/portage/mail.py b/portage_with_autodep/pym/portage/mail.py index 17dfcaf..3fcadd2 100644 --- a/portage_with_autodep/pym/portage/mail.py +++ b/portage_with_autodep/pym/portage/mail.py @@ -40,8 +40,7 @@ else: def TextMessage(_text): from email.mime.text import MIMEText mimetext = MIMEText(_text) - if sys.hexversion >= 0x3000000: - mimetext.set_charset("UTF-8") + mimetext.set_charset("UTF-8") return mimetext def create_message(sender, recipient, subject, body, attachments=None): diff --git a/portage_with_autodep/pym/portage/mail.pyo b/portage_with_autodep/pym/portage/mail.pyo Binary files differnew file mode 100644 index 0000000..bc3a76d --- /dev/null +++ b/portage_with_autodep/pym/portage/mail.pyo diff --git a/portage_with_autodep/pym/portage/manifest.py b/portage_with_autodep/pym/portage/manifest.py index 13efab7..90324ee 100644 --- a/portage_with_autodep/pym/portage/manifest.py +++ b/portage_with_autodep/pym/portage/manifest.py @@ -1,8 +1,10 @@ -# Copyright 1999-2011 Gentoo Foundation +# Copyright 1999-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import errno import io +import re +import warnings import portage portage.proxy.lazyimport.lazyimport(globals(), @@ -17,8 +19,13 @@ from portage import _unicode_encode from portage.exception import DigestException, FileNotFound, \ InvalidDataType, MissingParameter, PermissionDenied, \ PortageException, PortagePackageException +from portage.const import (MANIFEST1_HASH_FUNCTIONS, MANIFEST2_HASH_DEFAULTS, + MANIFEST2_HASH_FUNCTIONS, MANIFEST2_IDENTIFIERS, MANIFEST2_REQUIRED_HASH) from portage.localization import _ +# Characters prohibited by repoman's file.name check. +_prohibited_filename_chars_re = re.compile(r'[^a-zA-Z0-9._\-+:]') + class FileNotInManifestException(PortageException): pass @@ -30,10 +37,14 @@ def manifest2AuxfileFilter(filename): for x in mysplit: if x[:1] == '.': return False + if _prohibited_filename_chars_re.search(x) is not None: + return False return not filename[:7] == 'digest-' def manifest2MiscfileFilter(filename): filename = filename.strip(os.sep) + if _prohibited_filename_chars_re.search(filename) is not None: + return False return not (filename in ["CVS", ".svn", "files", "Manifest"] or filename.endswith(".ebuild")) def guessManifestFileType(filename): @@ -49,9 +60,15 @@ def guessManifestFileType(filename): else: return "DIST" +def guessThinManifestFileType(filename): + type = guessManifestFileType(filename) + if type != "DIST": + return None + return "DIST" + def parseManifest2(mysplit): myentry = None - if len(mysplit) > 4 and mysplit[0] in portage.const.MANIFEST2_IDENTIFIERS: + if len(mysplit) > 4 and mysplit[0] in MANIFEST2_IDENTIFIERS: mytype = mysplit[0] myname = mysplit[1] try: @@ -93,25 +110,33 @@ class Manifest2Entry(ManifestEntry): class Manifest(object): parsers = (parseManifest2,) def __init__(self, pkgdir, distdir, fetchlist_dict=None, - manifest1_compat=False, from_scratch=False): - """ create new Manifest instance for package in pkgdir - and add compability entries for old portage versions if manifest1_compat == True. + manifest1_compat=DeprecationWarning, from_scratch=False, thin=False, + allow_missing=False, allow_create=True, hashes=None): + """ Create new Manifest instance for package in pkgdir. Do not parse Manifest file if from_scratch == True (only for internal use) The fetchlist_dict parameter is required only for generation of - a Manifest (not needed for parsing and checking sums).""" + a Manifest (not needed for parsing and checking sums). + If thin is specified, then the manifest carries only info for + distfiles.""" + + if manifest1_compat is not DeprecationWarning: + warnings.warn("The manifest1_compat parameter of the " + "portage.manifest.Manifest constructor is deprecated.", + DeprecationWarning, stacklevel=2) + self.pkgdir = _unicode_decode(pkgdir).rstrip(os.sep) + os.sep self.fhashdict = {} self.hashes = set() - self.hashes.update(portage.const.MANIFEST2_HASH_FUNCTIONS) - if manifest1_compat: - raise NotImplementedError("manifest1 support has been removed") + + if hashes is None: + hashes = MANIFEST2_HASH_DEFAULTS + + self.hashes.update(hashes.intersection(MANIFEST2_HASH_FUNCTIONS)) self.hashes.difference_update(hashname for hashname in \ list(self.hashes) if hashname not in hashfunc_map) self.hashes.add("size") - if manifest1_compat: - raise NotImplementedError("manifest1 support has been removed") - self.hashes.add(portage.const.MANIFEST2_REQUIRED_HASH) - for t in portage.const.MANIFEST2_IDENTIFIERS: + self.hashes.add(MANIFEST2_REQUIRED_HASH) + for t in MANIFEST2_IDENTIFIERS: self.fhashdict[t] = {} if not from_scratch: self._read() @@ -120,7 +145,13 @@ class Manifest(object): else: self.fetchlist_dict = {} self.distdir = distdir - self.guessType = guessManifestFileType + self.thin = thin + if thin: + self.guessType = guessThinManifestFileType + else: + self.guessType = guessManifestFileType + self.allow_missing = allow_missing + self.allow_create = allow_create def getFullname(self): """ Returns the absolute path to the Manifest file for this instance """ @@ -129,7 +160,7 @@ class Manifest(object): def getDigests(self): """ Compability function for old digest/manifest code, returns dict of filename:{hashfunction:hashvalue} """ rval = {} - for t in portage.const.MANIFEST2_IDENTIFIERS: + for t in MANIFEST2_IDENTIFIERS: rval.update(self.fhashdict[t]) return rval @@ -200,7 +231,7 @@ class Manifest(object): return myhashdict def _createManifestEntries(self): - valid_hashes = set(portage.const.MANIFEST2_HASH_FUNCTIONS) + valid_hashes = set(MANIFEST2_HASH_FUNCTIONS) valid_hashes.add('size') mytypes = list(self.fhashdict) mytypes.sort() @@ -218,16 +249,19 @@ class Manifest(object): def checkIntegrity(self): for t in self.fhashdict: for f in self.fhashdict[t]: - if portage.const.MANIFEST2_REQUIRED_HASH not in self.fhashdict[t][f]: - raise MissingParameter(_("Missing %s checksum: %s %s") % (portage.const.MANIFEST2_REQUIRED_HASH, t, f)) + if MANIFEST2_REQUIRED_HASH not in self.fhashdict[t][f]: + raise MissingParameter(_("Missing %s checksum: %s %s") % + (MANIFEST2_REQUIRED_HASH, t, f)) def write(self, sign=False, force=False): """ Write Manifest instance to disk, optionally signing it """ + if not self.allow_create: + return self.checkIntegrity() try: myentries = list(self._createManifestEntries()) update_manifest = True - if not force: + if myentries and not force: try: f = io.open(_unicode_encode(self.getFullname(), encoding=_encodings['fs'], errors='strict'), @@ -246,9 +280,24 @@ class Manifest(object): pass else: raise + if update_manifest: - write_atomic(self.getFullname(), - "".join("%s\n" % str(myentry) for myentry in myentries)) + if myentries or not (self.thin or self.allow_missing): + # If myentries is empty, don't write an empty manifest + # when thin or allow_missing is enabled. Except for + # thin manifests with no DIST entries, myentries is + # non-empty for all currently known use cases. + write_atomic(self.getFullname(), "".join("%s\n" % + str(myentry) for myentry in myentries)) + else: + # With thin manifest, there's no need to have + # a Manifest file if there are no DIST entries. + try: + os.unlink(self.getFullname()) + except OSError as e: + if e.errno != errno.ENOENT: + raise + if sign: self.sign() except (IOError, OSError) as e: @@ -270,14 +319,14 @@ class Manifest(object): fname = os.path.join("files", fname) if not os.path.exists(self.pkgdir+fname) and not ignoreMissing: raise FileNotFound(fname) - if not ftype in portage.const.MANIFEST2_IDENTIFIERS: + if not ftype in MANIFEST2_IDENTIFIERS: raise InvalidDataType(ftype) if ftype == "AUX" and fname.startswith("files"): fname = fname[6:] self.fhashdict[ftype][fname] = {} if hashdict != None: self.fhashdict[ftype][fname].update(hashdict) - if not portage.const.MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]: + if not MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]: self.updateFileHashes(ftype, fname, checkExisting=False, ignoreMissing=ignoreMissing) def removeFile(self, ftype, fname): @@ -290,7 +339,7 @@ class Manifest(object): def findFile(self, fname): """ Return entrytype of the given file if present in Manifest or None if not present """ - for t in portage.const.MANIFEST2_IDENTIFIERS: + for t in MANIFEST2_IDENTIFIERS: if fname in self.fhashdict[t]: return t return None @@ -305,6 +354,8 @@ class Manifest(object): distfiles to raise a FileNotFound exception for (if no file or existing checksums are available), and defaults to all distfiles when not specified.""" + if not self.allow_create: + return if checkExisting: self.checkAllHashes() if assumeDistHashesSometimes or assumeDistHashesAlways: @@ -313,13 +364,88 @@ class Manifest(object): distfilehashes = {} self.__init__(self.pkgdir, self.distdir, fetchlist_dict=self.fetchlist_dict, from_scratch=True, - manifest1_compat=False) - cpvlist = [] + thin=self.thin, allow_missing=self.allow_missing, + allow_create=self.allow_create, hashes=self.hashes) pn = os.path.basename(self.pkgdir.rstrip(os.path.sep)) cat = self._pkgdir_category() pkgdir = self.pkgdir + if self.thin: + cpvlist = self._update_thin_pkgdir(cat, pn, pkgdir) + else: + cpvlist = self._update_thick_pkgdir(cat, pn, pkgdir) + + distlist = set() + for cpv in cpvlist: + distlist.update(self._getCpvDistfiles(cpv)) + + if requiredDistfiles is None: + # This allows us to force removal of stale digests for the + # ebuild --force digest option (no distfiles are required). + requiredDistfiles = set() + elif len(requiredDistfiles) == 0: + # repoman passes in an empty list, which implies that all distfiles + # are required. + requiredDistfiles = distlist.copy() + required_hash_types = set() + required_hash_types.add("size") + required_hash_types.add(MANIFEST2_REQUIRED_HASH) + for f in distlist: + fname = os.path.join(self.distdir, f) + mystat = None + try: + mystat = os.stat(fname) + except OSError: + pass + if f in distfilehashes and \ + not required_hash_types.difference(distfilehashes[f]) and \ + ((assumeDistHashesSometimes and mystat is None) or \ + (assumeDistHashesAlways and mystat is None) or \ + (assumeDistHashesAlways and mystat is not None and \ + set(distfilehashes[f]) == set(self.hashes) and \ + distfilehashes[f]["size"] == mystat.st_size)): + self.fhashdict["DIST"][f] = distfilehashes[f] + else: + try: + self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes) + except FileNotFound: + if f in requiredDistfiles: + raise + def _is_cpv(self, cat, pn, filename): + if not filename.endswith(".ebuild"): + return None + pf = filename[:-7] + ps = portage.versions._pkgsplit(pf) + cpv = "%s/%s" % (cat, pf) + if not ps: + raise PortagePackageException( + _("Invalid package name: '%s'") % cpv) + if ps[0] != pn: + raise PortagePackageException( + _("Package name does not " + "match directory name: '%s'") % cpv) + return cpv + + def _update_thin_pkgdir(self, cat, pn, pkgdir): + for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir): + break + cpvlist = [] + for f in pkgdir_files: + try: + f = _unicode_decode(f, + encoding=_encodings['fs'], errors='strict') + except UnicodeDecodeError: + continue + if f[:1] == '.': + continue + pf = self._is_cpv(cat, pn, f) + if pf is not None: + cpvlist.append(pf) + return cpvlist + + def _update_thick_pkgdir(self, cat, pn, pkgdir): + cpvlist = [] for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir): break for f in pkgdir_files: @@ -330,21 +456,10 @@ class Manifest(object): continue if f[:1] == ".": continue - pf = None - if f[-7:] == '.ebuild': - pf = f[:-7] + pf = self._is_cpv(cat, pn, f) if pf is not None: mytype = "EBUILD" - ps = portage.versions._pkgsplit(pf) - cpv = "%s/%s" % (cat, pf) - if not ps: - raise PortagePackageException( - _("Invalid package name: '%s'") % cpv) - if ps[0] != pn: - raise PortagePackageException( - _("Package name does not " - "match directory name: '%s'") % cpv) - cpvlist.append(cpv) + cpvlist.append(pf) elif manifest2MiscfileFilter(f): mytype = "MISC" else: @@ -368,41 +483,7 @@ class Manifest(object): continue self.fhashdict["AUX"][f] = perform_multiple_checksums( os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes) - distlist = set() - for cpv in cpvlist: - distlist.update(self._getCpvDistfiles(cpv)) - if requiredDistfiles is None: - # This allows us to force removal of stale digests for the - # ebuild --force digest option (no distfiles are required). - requiredDistfiles = set() - elif len(requiredDistfiles) == 0: - # repoman passes in an empty list, which implies that all distfiles - # are required. - requiredDistfiles = distlist.copy() - required_hash_types = set() - required_hash_types.add("size") - required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH) - for f in distlist: - fname = os.path.join(self.distdir, f) - mystat = None - try: - mystat = os.stat(fname) - except OSError: - pass - if f in distfilehashes and \ - not required_hash_types.difference(distfilehashes[f]) and \ - ((assumeDistHashesSometimes and mystat is None) or \ - (assumeDistHashesAlways and mystat is None) or \ - (assumeDistHashesAlways and mystat is not None and \ - len(distfilehashes[f]) == len(self.hashes) and \ - distfilehashes[f]["size"] == mystat.st_size)): - self.fhashdict["DIST"][f] = distfilehashes[f] - else: - try: - self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes) - except FileNotFound: - if f in requiredDistfiles: - raise + return cpvlist def _pkgdir_category(self): return self.pkgdir.rstrip(os.sep).split(os.sep)[-2] @@ -417,7 +498,7 @@ class Manifest(object): return absname def checkAllHashes(self, ignoreMissingFiles=False): - for t in portage.const.MANIFEST2_IDENTIFIERS: + for t in MANIFEST2_IDENTIFIERS: self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles) def checkTypeHashes(self, idtype, ignoreMissingFiles=False): @@ -481,7 +562,7 @@ class Manifest(object): def updateAllHashes(self, checkExisting=False, ignoreMissingFiles=True): """ Regenerate all hashes for all files in this Manifest. """ - for idtype in portage.const.MANIFEST2_IDENTIFIERS: + for idtype in MANIFEST2_IDENTIFIERS: self.updateTypeHashes(idtype, checkExisting=checkExisting, ignoreMissingFiles=ignoreMissingFiles) @@ -526,9 +607,11 @@ class Manifest(object): myfile.close() for l in lines: mysplit = l.split() - if len(mysplit) == 4 and mysplit[0] in portage.const.MANIFEST1_HASH_FUNCTIONS and not 1 in rVal: + if len(mysplit) == 4 and mysplit[0] in MANIFEST1_HASH_FUNCTIONS \ + and 1 not in rVal: rVal.append(1) - elif len(mysplit) > 4 and mysplit[0] in portage.const.MANIFEST2_IDENTIFIERS and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal: + elif len(mysplit) > 4 and mysplit[0] in MANIFEST2_IDENTIFIERS \ + and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal: rVal.append(2) return rVal diff --git a/portage_with_autodep/pym/portage/manifest.pyo b/portage_with_autodep/pym/portage/manifest.pyo Binary files differnew file mode 100644 index 0000000..d482bbd --- /dev/null +++ b/portage_with_autodep/pym/portage/manifest.pyo diff --git a/portage_with_autodep/pym/portage/news.py b/portage_with_autodep/pym/portage/news.py index 866e5b0..bbd9325 100644 --- a/portage_with_autodep/pym/portage/news.py +++ b/portage_with_autodep/pym/portage/news.py @@ -2,24 +2,30 @@ # Copyright 2006-2011 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 +from __future__ import print_function + __all__ = ["NewsManager", "NewsItem", "DisplayRestriction", "DisplayProfileRestriction", "DisplayKeywordRestriction", - "DisplayInstalledRestriction"] + "DisplayInstalledRestriction", + "count_unread_news", "display_news_notifications"] import io import logging import os as _os import re +from portage import OrderedDict from portage import os from portage import _encodings from portage import _unicode_decode from portage import _unicode_encode +from portage.const import NEWS_LIB_PATH from portage.util import apply_secpass_permissions, ensure_dirs, \ grabfile, normalize_path, write_atomic, writemsg_level from portage.data import portage_gid from portage.dep import isvalidatom from portage.localization import _ from portage.locks import lockfile, unlockfile +from portage.output import colorize from portage.exception import InvalidLocation, OperationNotPermitted, \ PermissionDenied @@ -39,7 +45,6 @@ class NewsManager(object): def __init__(self, portdb, vardb, news_path, unread_path, language_id='en'): self.news_path = news_path self.unread_path = unread_path - self.target_root = vardb.root self.language_id = language_id self.config = vardb.settings self.vdb = vardb @@ -114,7 +119,6 @@ class NewsManager(object): except PermissionDenied: return - updates = [] for itemid in news: try: itemid = _unicode_decode(itemid, @@ -250,10 +254,11 @@ class NewsItem(object): return self._valid def parse(self): - lines = io.open(_unicode_encode(self.path, + f = io.open(_unicode_encode(self.path, encoding=_encodings['fs'], errors='strict'), - mode='r', encoding=_encodings['content'], errors='replace' - ).readlines() + mode='r', encoding=_encodings['content'], errors='replace') + lines = f.readlines() + f.close() self.restrictions = {} invalids = [] for i, line in enumerate(lines): @@ -349,3 +354,67 @@ class DisplayInstalledRestriction(DisplayRestriction): if vdb.match(self.atom): return True return False + +def count_unread_news(portdb, vardb, repos=None, update=True): + """ + Returns a dictionary mapping repos to integer counts of unread news items. + By default, this will scan all repos and check for new items that have + appeared since the last scan. + + @param portdb: a portage tree database + @type portdb: pordbapi + @param vardb: an installed package database + @type vardb: vardbapi + @param repos: names of repos to scan (None means to scan all available repos) + @type repos: list or None + @param update: check for new items (default is True) + @type update: boolean + @rtype: dict + @return: dictionary mapping repos to integer counts of unread news items + """ + + NEWS_PATH = os.path.join("metadata", "news") + UNREAD_PATH = os.path.join(vardb.settings['EROOT'], NEWS_LIB_PATH, "news") + news_counts = OrderedDict() + if repos is None: + repos = portdb.getRepositories() + + permission_msgs = set() + for repo in repos: + try: + manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH) + count = manager.getUnreadItems(repo, update=True) + except PermissionDenied as e: + # NOTE: The NewsManager typically handles permission errors by + # returning silently, so PermissionDenied won't necessarily be + # raised even if we do trigger a permission error above. + msg = _unicode_decode("Permission denied: '%s'\n") % (e,) + if msg in permission_msgs: + pass + else: + permission_msgs.add(msg) + writemsg_level(msg, level=logging.ERROR, noiselevel=-1) + news_counts[repo] = 0 + else: + news_counts[repo] = count + + return news_counts + +def display_news_notifications(news_counts): + """ + Display a notification for unread news items, using a dictionary mapping + repos to integer counts, like that returned from count_unread_news(). + """ + newsReaderDisplay = False + for repo, count in news_counts.items(): + if count > 0: + if not newsReaderDisplay: + newsReaderDisplay = True + print() + print(colorize("WARN", " * IMPORTANT:"), end=' ') + print("%s news items need reading for repository '%s'." % (count, repo)) + + if newsReaderDisplay: + print(colorize("WARN", " *"), end=' ') + print("Use " + colorize("GOOD", "eselect news") + " to read news items.") + print() diff --git a/portage_with_autodep/pym/portage/news.pyo b/portage_with_autodep/pym/portage/news.pyo Binary files differnew file mode 100644 index 0000000..bbd247c --- /dev/null +++ b/portage_with_autodep/pym/portage/news.pyo diff --git a/portage_with_autodep/pym/portage/output.py b/portage_with_autodep/pym/portage/output.py index 0e8245f..98bec81 100644 --- a/portage_with_autodep/pym/portage/output.py +++ b/portage_with_autodep/pym/portage/output.py @@ -162,11 +162,14 @@ def _parse_color_map(config_root='/', onerror=None): if token[0] in quotes and token[0] == token[-1]: token = token[1:-1] return token + + f = None try: - lineno=0 - for line in io.open(_unicode_encode(myfile, + f = io.open(_unicode_encode(myfile, encoding=_encodings['fs'], errors='strict'), - mode='r', encoding=_encodings['content'], errors='replace'): + mode='r', encoding=_encodings['content'], errors='replace') + lineno = 0 + for line in f: lineno += 1 commenter_pos = line.find("#") @@ -226,6 +229,9 @@ def _parse_color_map(config_root='/', onerror=None): elif e.errno == errno.EACCES: raise PermissionDenied(myfile) raise + finally: + if f is not None: + f.close() def nc_len(mystr): tmp = re.sub(esc_seq + "^m]+m", "", mystr); @@ -319,6 +325,12 @@ def style_to_ansi_code(style): ret += codes.get(attr_name, attr_name) return ret +def colormap(): + mycolors = [] + for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET", "NORMAL"): + mycolors.append("%s=$'%s'" % (c, style_to_ansi_code(c))) + return "\n".join(mycolors) + def colorize(color_key, text): global havecolor if havecolor: @@ -335,12 +347,12 @@ compat_functions_colors = ["bold","white","teal","turquoise","darkteal", "fuchsia","purple","blue","darkblue","green","darkgreen","yellow", "brown","darkyellow","red","darkred"] -def create_color_func(color_key): - def derived_func(*args): - newargs = list(args) - newargs.insert(0, color_key) - return colorize(*newargs) - return derived_func +class create_color_func(object): + __slots__ = ("_color_key",) + def __init__(self, color_key): + self._color_key = color_key + def __call__(self, text): + return colorize(self._color_key, text) for c in compat_functions_colors: globals()[c] = create_color_func(c) @@ -416,12 +428,14 @@ class StyleWriter(formatter.DumbWriter): def get_term_size(): """ Get the number of lines and columns of the tty that is connected to - stdout. Returns a tuple of (lines, columns) or (-1, -1) if an error + stdout. Returns a tuple of (lines, columns) or (0, 0) if an error occurs. The curses module is used if available, otherwise the output of - `stty size` is parsed. + `stty size` is parsed. The lines and columns values are guaranteed to be + greater than or equal to zero, since a negative COLUMNS variable is + known to prevent some commands from working (see bug #394091). """ if not sys.stdout.isatty(): - return -1, -1 + return (0, 0) try: import curses try: @@ -436,10 +450,13 @@ def get_term_size(): out = out.split() if len(out) == 2: try: - return int(out[0]), int(out[1]) + val = (int(out[0]), int(out[1])) except ValueError: pass - return -1, -1 + else: + if val[0] >= 0 and val[1] >= 0: + return val + return (0, 0) def set_term_size(lines, columns, fd): """ diff --git a/portage_with_autodep/pym/portage/output.pyo b/portage_with_autodep/pym/portage/output.pyo Binary files differnew file mode 100644 index 0000000..993a2de --- /dev/null +++ b/portage_with_autodep/pym/portage/output.pyo diff --git a/portage_with_autodep/pym/portage/package/__init__.pyo b/portage_with_autodep/pym/portage/package/__init__.pyo Binary files differnew file mode 100644 index 0000000..9d8f30c --- /dev/null +++ b/portage_with_autodep/pym/portage/package/__init__.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/__init__.pyo b/portage_with_autodep/pym/portage/package/ebuild/__init__.pyo Binary files differnew file mode 100644 index 0000000..927b4bc --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/__init__.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py index cd22554..0c613ce 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py @@ -1,4 +1,4 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ( @@ -11,7 +11,7 @@ from portage.dep import ExtendedAtomDict, _repo_separator, _slot_separator from portage.localization import _ from portage.package.ebuild._config.helper import ordered_by_atom_specificity from portage.util import grabdict_package, stack_lists, writemsg -from portage.versions import cpv_getkey +from portage.versions import cpv_getkey, _pkg_str class KeywordsManager(object): """Manager class to handle keywords processing and validation""" @@ -20,7 +20,8 @@ class KeywordsManager(object): global_accept_keywords=""): self._pkeywords_list = [] rawpkeywords = [grabdict_package( - os.path.join(x, "package.keywords"), recursive=1, + os.path.join(x.location, "package.keywords"), + recursive=x.portage1_directories, verify_eapi=True) \ for x in profiles] for pkeyworddict in rawpkeywords: @@ -35,7 +36,8 @@ class KeywordsManager(object): self._p_accept_keywords = [] raw_p_accept_keywords = [grabdict_package( - os.path.join(x, "package.accept_keywords"), recursive=1, + os.path.join(x.location, "package.accept_keywords"), + recursive=x.portage1_directories, verify_eapi=True) \ for x in profiles] for d in raw_p_accept_keywords: @@ -75,10 +77,11 @@ class KeywordsManager(object): def getKeywords(self, cpv, slot, keywords, repo): - cp = cpv_getkey(cpv) - pkg = "".join((cpv, _slot_separator, slot)) - if repo and repo != Package.UNKNOWN_REPO: - pkg = "".join((pkg, _repo_separator, repo)) + if not hasattr(cpv, 'slot'): + pkg = _pkg_str(cpv, slot=slot, repo=repo) + else: + pkg = cpv + cp = pkg.cp keywords = [[x for x in keywords.split() if x != "-*"]] for pkeywords_dict in self._pkeywords_list: cpdict = pkeywords_dict.get(cp) @@ -206,12 +209,16 @@ class KeywordsManager(object): hasstable = False hastesting = False for gp in mygroups: - if gp == "*" or (gp == "-*" and len(mygroups) == 1): - writemsg(_("--- WARNING: Package '%(cpv)s' uses" - " '%(keyword)s' keyword.\n") % {"cpv": cpv, "keyword": gp}, - noiselevel=-1) - if gp == "*": - match = True + if gp == "*": + match = True + break + elif gp == "~*": + hastesting = True + for x in pgroups: + if x[:1] == "~": + match = True + break + if match: break elif gp in pgroups: match = True @@ -254,18 +261,19 @@ class KeywordsManager(object): """ pgroups = global_accept_keywords.split() + if not hasattr(cpv, 'slot'): + cpv = _pkg_str(cpv, slot=slot, repo=repo) cp = cpv_getkey(cpv) unmaskgroups = [] if self._p_accept_keywords: - cpv_slot = "%s:%s" % (cpv, slot) accept_keywords_defaults = tuple('~' + keyword for keyword in \ pgroups if keyword[:1] not in "~-") for d in self._p_accept_keywords: cpdict = d.get(cp) if cpdict: pkg_accept_keywords = \ - ordered_by_atom_specificity(cpdict, cpv_slot) + ordered_by_atom_specificity(cpdict, cpv) if pkg_accept_keywords: for x in pkg_accept_keywords: if not x: @@ -274,9 +282,8 @@ class KeywordsManager(object): pkgdict = self.pkeywordsdict.get(cp) if pkgdict: - cpv_slot = "%s:%s" % (cpv, slot) pkg_accept_keywords = \ - ordered_by_atom_specificity(pkgdict, cpv_slot, repo=repo) + ordered_by_atom_specificity(pkgdict, cpv) if pkg_accept_keywords: for x in pkg_accept_keywords: unmaskgroups.extend(x) diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.pyo Binary files differnew file mode 100644 index 0000000..15043f0 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py index effd55b..f76e7e2 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py @@ -1,4 +1,4 @@ -# Copyright 2010 Gentoo Foundation +# Copyright 201-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ( @@ -10,7 +10,7 @@ from portage.dep import ExtendedAtomDict, use_reduce from portage.exception import InvalidDependString from portage.localization import _ from portage.util import grabdict, grabdict_package, writemsg -from portage.versions import cpv_getkey +from portage.versions import cpv_getkey, _pkg_str from portage.package.ebuild._config.helper import ordered_by_atom_specificity @@ -119,8 +119,9 @@ class LicenseManager(object): cp = cpv_getkey(cpv) cpdict = self._plicensedict.get(cp) if cpdict: - cpv_slot = "%s:%s" % (cpv, slot) - plicence_list = ordered_by_atom_specificity(cpdict, cpv_slot, repo) + if not hasattr(cpv, slot): + cpv = _pkg_str(cpv, slot=slot, repo=repo) + plicence_list = ordered_by_atom_specificity(cpdict, cpv) if plicence_list: accept_license = list(self._accept_license) for x in plicence_list: diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.pyo Binary files differnew file mode 100644 index 0000000..4a38298 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py index c2b115b..f7a1177 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py @@ -5,7 +5,11 @@ __all__ = ( 'LocationsManager', ) +import collections import io +import warnings + +import portage from portage import os, eapi_is_supported, _encodings, _unicode_encode from portage.const import CUSTOM_PROFILE_PATH, GLOBAL_CONFIG_PATH, \ PROFILE_PATH, USER_CONFIG_PATH @@ -13,7 +17,20 @@ from portage.exception import DirectoryNotFound, ParseError from portage.localization import _ from portage.util import ensure_dirs, grabfile, \ normalize_path, shlex_split, writemsg +from portage.repository.config import parse_layout_conf, \ + _portage1_profiles_allow_directories + + +_PORTAGE1_DIRECTORIES = frozenset([ + 'package.mask', 'package.provided', + 'package.use', 'package.use.mask', 'package.use.force', + 'use.mask', 'use.force']) + +_profile_node = collections.namedtuple('_profile_node', + 'location portage1_directories') +_allow_parent_colon = frozenset( + ["portage-2"]) class LocationsManager(object): @@ -25,9 +42,9 @@ class LocationsManager(object): self.config_root = config_root self.target_root = target_root self._user_config = local_config - + if self.eprefix is None: - self.eprefix = "" + self.eprefix = portage.const.EPREFIX if self.config_root is None: self.config_root = self.eprefix + os.sep @@ -37,17 +54,33 @@ class LocationsManager(object): self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root) self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH) + self.config_profile_path = config_profile_path + + def load_profiles(self, repositories, known_repository_paths): + known_repository_paths = set(os.path.realpath(x) + for x in known_repository_paths) - if config_profile_path is None: - config_profile_path = \ + known_repos = [] + for x in known_repository_paths: + try: + layout_data = {"profile-formats": + repositories.get_repo_for_location(x).profile_formats} + except KeyError: + layout_data = parse_layout_conf(x)[0] + # force a trailing '/' for ease of doing startswith checks + known_repos.append((x + '/', layout_data)) + known_repos = tuple(known_repos) + + if self.config_profile_path is None: + self.config_profile_path = \ os.path.join(self.config_root, PROFILE_PATH) - if os.path.isdir(config_profile_path): - self.profile_path = config_profile_path + if os.path.isdir(self.config_profile_path): + self.profile_path = self.config_profile_path else: - config_profile_path = \ + self.config_profile_path = \ os.path.join(self.abs_user_config, 'make.profile') - if os.path.isdir(config_profile_path): - self.profile_path = config_profile_path + if os.path.isdir(self.config_profile_path): + self.profile_path = self.config_profile_path else: self.profile_path = None else: @@ -55,19 +88,22 @@ class LocationsManager(object): # here, in order to create an empty profile # for checking dependencies of packages with # empty KEYWORDS. - self.profile_path = config_profile_path + self.profile_path = self.config_profile_path # The symlink might not exist or might not be a symlink. self.profiles = [] + self.profiles_complex = [] if self.profile_path: try: - self._addProfile(os.path.realpath(self.profile_path)) + self._addProfile(os.path.realpath(self.profile_path), + repositories, known_repos) except ParseError as e: writemsg(_("!!! Unable to parse profile: '%s'\n") % \ self.profile_path, noiselevel=-1) writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1) self.profiles = [] + self.profiles_complex = [] if self._user_config and self.profiles: custom_prof = os.path.join( @@ -75,9 +111,11 @@ class LocationsManager(object): if os.path.exists(custom_prof): self.user_profile_dir = custom_prof self.profiles.append(custom_prof) + self.profiles_complex.append(_profile_node(custom_prof, True)) del custom_prof self.profiles = tuple(self.profiles) + self.profiles_complex = tuple(self.profiles_complex) def _check_var_directory(self, varname, var): if not os.path.isdir(var): @@ -86,14 +124,45 @@ class LocationsManager(object): noiselevel=-1) raise DirectoryNotFound(var) - def _addProfile(self, currentPath): + def _addProfile(self, currentPath, repositories, known_repos): + current_abs_path = os.path.abspath(currentPath) + allow_directories = True + allow_parent_colon = True + repo_loc = None + compat_mode = False + intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])] + if intersecting_repos: + # protect against nested repositories. Insane configuration, but the longest + # path will be the correct one. + repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0])) + allow_directories = any(x in _portage1_profiles_allow_directories + for x in layout_data['profile-formats']) + compat_mode = layout_data['profile-formats'] == ('portage-1-compat',) + allow_parent_colon = any(x in _allow_parent_colon + for x in layout_data['profile-formats']) + + if compat_mode: + offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath)) + offenders = sorted(x for x in offenders + if os.path.isdir(os.path.join(currentPath, x))) + if offenders: + warnings.warn(_("Profile '%(profile_path)s' in repository " + "'%(repo_name)s' is implicitly using 'portage-1' profile format, but " + "the repository profiles are not marked as that format. This will break " + "in the future. Please either convert the following paths " + "to files, or add\nprofile-formats = portage-1\nto the " + "repositories layout.conf. Files: '%(files)s'\n") + % dict(profile_path=currentPath, repo_name=repo_loc, + files=', '.join(offenders))) + parentsFile = os.path.join(currentPath, "parent") eapi_file = os.path.join(currentPath, "eapi") + f = None try: - eapi = io.open(_unicode_encode(eapi_file, + f = io.open(_unicode_encode(eapi_file, encoding=_encodings['fs'], errors='strict'), - mode='r', encoding=_encodings['content'], errors='replace' - ).readline().strip() + mode='r', encoding=_encodings['content'], errors='replace') + eapi = f.readline().strip() except IOError: pass else: @@ -102,21 +171,69 @@ class LocationsManager(object): "Profile contains unsupported " "EAPI '%s': '%s'") % \ (eapi, os.path.realpath(eapi_file),)) + finally: + if f is not None: + f.close() if os.path.exists(parentsFile): parents = grabfile(parentsFile) if not parents: raise ParseError( _("Empty parent file: '%s'") % parentsFile) for parentPath in parents: + abs_parent = parentPath[:1] == os.sep + if not abs_parent and allow_parent_colon: + parentPath = self._expand_parent_colon(parentsFile, + parentPath, repo_loc, repositories) + + # NOTE: This os.path.join() call is intended to ignore + # currentPath if parentPath is already absolute. parentPath = normalize_path(os.path.join( currentPath, parentPath)) + + if abs_parent or repo_loc is None or \ + not parentPath.startswith(repo_loc): + # It seems that this parent may point outside + # of the current repo, so realpath it. + parentPath = os.path.realpath(parentPath) + if os.path.exists(parentPath): - self._addProfile(parentPath) + self._addProfile(parentPath, repositories, known_repos) else: raise ParseError( _("Parent '%s' not found: '%s'") % \ (parentPath, parentsFile)) + self.profiles.append(currentPath) + self.profiles_complex.append( + _profile_node(currentPath, allow_directories)) + + def _expand_parent_colon(self, parentsFile, parentPath, + repo_loc, repositories): + colon = parentPath.find(":") + if colon == -1: + return parentPath + + if colon == 0: + if repo_loc is None: + raise ParseError( + _("Parent '%s' not found: '%s'") % \ + (parentPath, parentsFile)) + else: + parentPath = normalize_path(os.path.join( + repo_loc, 'profiles', parentPath[colon+1:])) + else: + p_repo_name = parentPath[:colon] + try: + p_repo_loc = repositories.get_location_for_name(p_repo_name) + except KeyError: + raise ParseError( + _("Parent '%s' not found: '%s'") % \ + (parentPath, parentsFile)) + else: + parentPath = normalize_path(os.path.join( + p_repo_loc, 'profiles', parentPath[colon+1:])) + + return parentPath def set_root_override(self, root_overwrite=None): # Allow ROOT setting to come from make.conf if it's not overridden diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.pyo Binary files differnew file mode 100644 index 0000000..c64d313 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py index df93e10..bce1152 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py @@ -5,9 +5,12 @@ __all__ = ( 'MaskManager', ) +import warnings + from portage import os from portage.dep import ExtendedAtomDict, match_from_list, _repo_separator, _slot_separator -from portage.util import append_repo, grabfile_package, stack_lists +from portage.localization import _ +from portage.util import append_repo, grabfile_package, stack_lists, writemsg from portage.versions import cpv_getkey from _emerge.Package import Package @@ -32,30 +35,76 @@ class MaskManager(object): # repo may be often referenced by others as the master. pmask_cache = {} - def grab_pmask(loc): + def grab_pmask(loc, repo_config): if loc not in pmask_cache: - pmask_cache[loc] = grabfile_package( - os.path.join(loc, "profiles", "package.mask"), - recursive=1, remember_source_file=True, verify_eapi=True) + path = os.path.join(loc, 'profiles', 'package.mask') + pmask_cache[loc] = grabfile_package(path, + recursive=repo_config.portage1_profiles, + remember_source_file=True, verify_eapi=True) + if repo_config.portage1_profiles_compat and os.path.isdir(path): + warnings.warn(_("Repository '%(repo_name)s' is implicitly using " + "'portage-1' profile format in its profiles/package.mask, but " + "the repository profiles are not marked as that format. This will break " + "in the future. Please either convert the following paths " + "to files, or add\nprofile-formats = portage-1\nto the " + "repositories layout.conf.\n") + % dict(repo_name=repo_config.name)) + return pmask_cache[loc] repo_pkgmasklines = [] for repo in repositories.repos_with_profiles(): lines = [] - repo_lines = grab_pmask(repo.location) + repo_lines = grab_pmask(repo.location, repo) + removals = frozenset(line[0][1:] for line in repo_lines + if line[0][:1] == "-") + matched_removals = set() for master in repo.masters: - master_lines = grab_pmask(master.location) + master_lines = grab_pmask(master.location, master) + for line in master_lines: + if line[0] in removals: + matched_removals.add(line[0]) + # Since we don't stack masters recursively, there aren't any + # atoms earlier in the stack to be matched by negative atoms in + # master_lines. Also, repo_lines may contain negative atoms + # that are intended to negate atoms from a different master + # than the one with which we are currently stacking. Therefore, + # we disable warn_for_unmatched_removal here (see bug #386569). lines.append(stack_lists([master_lines, repo_lines], incremental=1, - remember_source_file=True, warn_for_unmatched_removal=True, - strict_warn_for_unmatched_removal=strict_umatched_removal)) - if not repo.masters: + remember_source_file=True, warn_for_unmatched_removal=False)) + + # It's safe to warn for unmatched removal if masters have not + # been overridden by the user, which is guaranteed when + # user_config is false (when called by repoman). + if repo.masters: + unmatched_removals = removals.difference(matched_removals) + if unmatched_removals and not user_config: + source_file = os.path.join(repo.location, + "profiles", "package.mask") + unmatched_removals = list(unmatched_removals) + if len(unmatched_removals) > 3: + writemsg( + _("--- Unmatched removal atoms in %s: %s and %s more\n") % + (source_file, + ", ".join("-" + x for x in unmatched_removals[:3]), + len(unmatched_removals) - 3), noiselevel=-1) + else: + writemsg( + _("--- Unmatched removal atom(s) in %s: %s\n") % + (source_file, + ", ".join("-" + x for x in unmatched_removals)), + noiselevel=-1) + + else: lines.append(stack_lists([repo_lines], incremental=1, - remember_source_file=True, warn_for_unmatched_removal=True, + remember_source_file=True, warn_for_unmatched_removal=not user_config, strict_warn_for_unmatched_removal=strict_umatched_removal)) repo_pkgmasklines.extend(append_repo(stack_lists(lines), repo.name, remember_source_file=True)) repo_pkgunmasklines = [] for repo in repositories.repos_with_profiles(): + if not repo.portage1_profiles: + continue repo_lines = grabfile_package(os.path.join(repo.location, "profiles", "package.unmask"), \ recursive=1, remember_source_file=True, verify_eapi=True) lines = stack_lists([repo_lines], incremental=1, \ @@ -69,9 +118,14 @@ class MaskManager(object): profile_pkgunmasklines = [] for x in profiles: profile_pkgmasklines.append(grabfile_package( - os.path.join(x, "package.mask"), recursive=1, remember_source_file=True, verify_eapi=True)) - profile_pkgunmasklines.append(grabfile_package( - os.path.join(x, "package.unmask"), recursive=1, remember_source_file=True, verify_eapi=True)) + os.path.join(x.location, "package.mask"), + recursive=x.portage1_directories, + remember_source_file=True, verify_eapi=True)) + if x.portage1_directories: + profile_pkgunmasklines.append(grabfile_package( + os.path.join(x.location, "package.unmask"), + recursive=x.portage1_directories, + remember_source_file=True, verify_eapi=True)) profile_pkgmasklines = stack_lists(profile_pkgmasklines, incremental=1, \ remember_source_file=True, warn_for_unmatched_removal=True, strict_warn_for_unmatched_removal=strict_umatched_removal) diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.pyo Binary files differnew file mode 100644 index 0000000..f48eb47 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py index d7ef0f6..e1ec7f4 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py @@ -1,4 +1,4 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ( @@ -7,10 +7,10 @@ __all__ = ( from _emerge.Package import Package from portage import os -from portage.dep import ExtendedAtomDict, remove_slot, _get_useflag_re +from portage.dep import dep_getrepo, dep_getslot, ExtendedAtomDict, remove_slot, _get_useflag_re from portage.localization import _ from portage.util import grabfile, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg -from portage.versions import cpv_getkey +from portage.versions import cpv_getkey, _pkg_str from portage.package.ebuild._config.helper import ordered_by_atom_specificity @@ -65,9 +65,9 @@ class UseManager(object): self.repositories = repositories - def _parse_file_to_tuple(self, file_name): + def _parse_file_to_tuple(self, file_name, recursive=True): ret = [] - lines = grabfile(file_name, recursive=1) + lines = grabfile(file_name, recursive=recursive) eapi = read_corresponding_eapi_file(file_name) useflag_re = _get_useflag_re(eapi) for prefixed_useflag in lines: @@ -82,10 +82,10 @@ class UseManager(object): ret.append(prefixed_useflag) return tuple(ret) - def _parse_file_to_dict(self, file_name, juststrings=False): + def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True): ret = {} location_dict = {} - file_dict = grabdict_package(file_name, recursive=1, verify_eapi=True) + file_dict = grabdict_package(file_name, recursive=recursive, verify_eapi=True) eapi = read_corresponding_eapi_file(file_name) useflag_re = _get_useflag_re(eapi) for k, v in file_dict.items(): @@ -132,19 +132,29 @@ class UseManager(object): return ret def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations): - return tuple(self._parse_file_to_tuple(os.path.join(profile, file_name)) for profile in locations) + return tuple(self._parse_file_to_tuple( + os.path.join(profile.location, file_name), + recursive=profile.portage1_directories) + for profile in locations) def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations, juststrings=False): - return tuple(self._parse_file_to_dict(os.path.join(profile, file_name), juststrings) for profile in locations) + return tuple(self._parse_file_to_dict( + os.path.join(profile.location, file_name), juststrings, + recursive=profile.portage1_directories) + for profile in locations) def getUseMask(self, pkg=None): if pkg is None: return frozenset(stack_lists( self._usemask_list, incremental=True)) + slot = None cp = getattr(pkg, "cp", None) if cp is None: - cp = cpv_getkey(remove_slot(pkg)) + slot = dep_getslot(pkg) + repo = dep_getrepo(pkg) + pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo) + cp = pkg.cp usemask = [] if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO: repos = [] diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.pyo Binary files differnew file mode 100644 index 0000000..2c9a609 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.pyo Binary files differnew file mode 100644 index 0000000..b2ebd21 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.pyo Binary files differnew file mode 100644 index 0000000..b03cc29 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.pyo Binary files differnew file mode 100644 index 0000000..aeee789 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.pyo Binary files differnew file mode 100644 index 0000000..9854444 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py index 4f46781..ee0c090 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py @@ -1,4 +1,4 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ( @@ -24,7 +24,7 @@ def ordered_by_atom_specificity(cpdict, pkg, repo=None): order to achieve desired results (and thus corrupting the ChangeLog like ordering of the file). """ - if repo and repo != Package.UNKNOWN_REPO: + if not hasattr(pkg, 'repo') and repo and repo != Package.UNKNOWN_REPO: pkg = pkg + _repo_separator + repo results = [] diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/helper.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.pyo Binary files differnew file mode 100644 index 0000000..f2b9261 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py index 6d42809..1a75de9 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py @@ -1,4 +1,4 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ( @@ -15,14 +15,14 @@ env_blacklist = frozenset(( "A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI", "EBUILD_FORCE_TEST", "EBUILD_PHASE", "EBUILD_SKIP_MANIFEST", "ED", "EMERGE_FROM", "EPREFIX", "EROOT", - "HOMEPAGE", "INHERITED", "IUSE", + "GREP_OPTIONS", "HOMEPAGE", "INHERITED", "IUSE", "KEYWORDS", "LICENSE", "MERGE_TYPE", "PDEPEND", "PF", "PKGUSE", "PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDIR_LOCKED", "PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT", "PORTAGE_IUSE", - "PORTAGE_NONFATAL", "PORTAGE_REPO_NAME", "PORTAGE_SANDBOX_COMPAT_LEVEL", - "PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "RESTRICT", - "ROOT", "SLOT", "SRC_URI" + "PORTAGE_NONFATAL", "PORTAGE_REPO_NAME", + "PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "REPOSITORY", + "RESTRICT", "ROOT", "SLOT", "SRC_URI" )) environ_whitelist = [] @@ -36,7 +36,7 @@ environ_whitelist = [] # environment in order to prevent sandbox from sourcing /etc/profile # in it's bashrc (causing major leakage). environ_whitelist += [ - "ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "D", + "ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "COLUMNS", "D", "DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD", "EBUILD_FORCE_TEST", "EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "ED", @@ -50,23 +50,23 @@ environ_whitelist += [ "PORTAGE_BINPKG_TMPFILE", "PORTAGE_BIN_PATH", "PORTAGE_BUILDDIR", "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND", - "PORTAGE_COLORMAP", + "PORTAGE_COLORMAP", "PORTAGE_COMPRESS_EXCLUDE_SUFFIXES", "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR", "PORTAGE_EBUILD_EXIT_FILE", "PORTAGE_FEATURES", "PORTAGE_GID", "PORTAGE_GRPNAME", "PORTAGE_INST_GID", "PORTAGE_INST_UID", "PORTAGE_IPC_DAEMON", "PORTAGE_IUSE", - "PORTAGE_LOG_FILE", + "PORTAGE_LOG_FILE", "PORTAGE_OVERRIDE_EPREFIX", "PORTAGE_PYM_PATH", "PORTAGE_PYTHON", "PORTAGE_QUIET", "PORTAGE_REPO_NAME", "PORTAGE_RESTRICT", - "PORTAGE_SANDBOX_COMPAT_LEVEL", "PORTAGE_SIGPIPE_STATUS", + "PORTAGE_SIGPIPE_STATUS", "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_USERNAME", "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE", "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS", "REPLACING_VERSIONS", "REPLACED_BY_VERSION", "ROOT", "ROOTPATH", "T", "TMP", "TMPDIR", "USE_EXPAND", "USE_ORDER", "WORKDIR", - "XARGS", + "XARGS", "__PORTAGE_TEST_HARDLINK_LOCKS", ] # user config variables @@ -134,8 +134,9 @@ environ_filter += [ # portage config variables and variables set directly by portage environ_filter += [ "ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN", - "CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT", - "CONFIG_PROTECT_MASK", "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS", + "CLEAN_DELAY", "COLLISION_IGNORE", + "CONFIG_PROTECT", "CONFIG_PROTECT_MASK", + "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS", "EMERGE_LOG_DIR", "EMERGE_WARNING_DELAY", "FETCHCOMMAND", "FETCHCOMMAND_FTP", @@ -143,7 +144,8 @@ environ_filter += [ "FETCHCOMMAND_RSYNC", "FETCHCOMMAND_SFTP", "GENTOO_MIRRORS", "NOCONFMEM", "O", "PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE", - "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_BUILDIR_LOCKED", "PORTAGE_CALLER", + "PORTAGE_BINHOST", + "PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_BUILDIR_LOCKED", "PORTAGE_ELOG_CLASSES", "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT", "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM", @@ -156,8 +158,8 @@ environ_filter += [ "PORTAGE_RO_DISTDIRS", "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS", "PORTAGE_RSYNC_RETRIES", "PORTAGE_SYNC_STALE", - "PORTAGE_USE", "PORT_LOGDIR", - "QUICKPKG_DEFAULT_OPTS", + "PORTAGE_USE", "PORT_LOGDIR", "PORT_LOGDIR_CLEAN", + "QUICKPKG_DEFAULT_OPTS", "REPOMAN_DEFAULT_OPTS", "RESUMECOMMAND", "RESUMECOMMAND_FTP", "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTPS", "RESUMECOMMAND_RSYNC", "RESUMECOMMAND_SFTP", diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.pyo Binary files differnew file mode 100644 index 0000000..06ea37e --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/unpack_dependencies.py b/portage_with_autodep/pym/portage/package/ebuild/_config/unpack_dependencies.py new file mode 100644 index 0000000..1375189 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_config/unpack_dependencies.py @@ -0,0 +1,38 @@ +# Copyright 2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +from portage import os, _supported_eapis +from portage.dep import use_reduce +from portage.eapi import eapi_has_automatic_unpack_dependencies +from portage.exception import InvalidDependString +from portage.localization import _ +from portage.util import grabfile, writemsg + +def load_unpack_dependencies_configuration(repositories): + repo_dict = {} + for repo in repositories.repos_with_profiles(): + for eapi in _supported_eapis: + if eapi_has_automatic_unpack_dependencies(eapi): + file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi) + lines = grabfile(file_name, recursive=True) + for line in lines: + elements = line.split() + suffix = elements[0].lower() + if len(elements) == 1: + writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name)) + depend = " ".join(elements[1:]) + try: + use_reduce(depend, eapi=eapi) + except InvalidDependString as e: + writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e))) + else: + repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend + + ret = {} + for repo in repositories.repos_with_profiles(): + for repo_name in [x.name for x in repo.masters] + [repo.name]: + for eapi in repo_dict.get(repo_name, {}): + for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items(): + ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend + + return ret diff --git a/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.py b/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.py new file mode 100644 index 0000000..d23677d --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.py @@ -0,0 +1,54 @@ +# Copyright 2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import textwrap + +import portage +from portage.dep import _repo_separator +from portage.elog import elog_process +from portage.elog.messages import eerror + +def eapi_invalid(self, cpv, repo_name, settings, + eapi_var, eapi_parsed, eapi_lineno): + + msg = [] + msg.extend(textwrap.wrap(("EAPI assignment in ebuild '%s%s%s' does not" + " conform with PMS section 7.3.1 (see bug #402167):") % + (cpv, _repo_separator, repo_name), 70)) + + if not eapi_parsed: + # None means the assignment was not found, while an + # empty string indicates an (invalid) empty assingment. + msg.append( + "\tvalid EAPI assignment must" + " occur on or before line: %s" % + eapi_lineno) + else: + msg.append(("\tbash returned EAPI '%s' which does not match " + "assignment on line: %s") % + (eapi_var, eapi_lineno)) + + if 'parse-eapi-ebuild-head' in settings.features: + msg.extend(textwrap.wrap(("NOTE: This error will soon" + " become unconditionally fatal in a future version of Portage," + " but at this time, it can by made non-fatal by setting" + " FEATURES=-parse-eapi-ebuild-head in" + " make.conf."), 70)) + else: + msg.extend(textwrap.wrap(("NOTE: This error will soon" + " become unconditionally fatal in a future version of Portage." + " At the earliest opportunity, please enable" + " FEATURES=parse-eapi-ebuild-head in make.conf in order to" + " make this error fatal."), 70)) + + if portage.data.secpass >= 2: + # TODO: improve elog permission error handling (bug #416231) + for line in msg: + eerror(line, phase="other", key=cpv) + elog_process(cpv, settings, + phasefilter=("other",)) + + else: + out = portage.output.EOutput() + for line in msg: + out.eerror(line) diff --git a/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.pyo b/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.pyo Binary files differnew file mode 100644 index 0000000..0181c03 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.pyo b/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.pyo Binary files differnew file mode 100644 index 0000000..315cb0f --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.pyo b/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.pyo Binary files differnew file mode 100644 index 0000000..9f75518 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py index fb6e61e..7bbb0e8 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py +++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py @@ -1,4 +1,4 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import io @@ -7,6 +7,7 @@ import portage from portage import os from portage import _unicode_decode from portage.dep import Atom +from portage.eapi import eapi_has_repo_deps from portage.elog import messages as elog_messages from portage.exception import InvalidAtom from portage.package.ebuild._ipc.IpcCommand import IpcCommand @@ -26,19 +27,21 @@ class QueryCommand(IpcCommand): def __call__(self, argv): """ - @returns: tuple of (stdout, stderr, returncode) + @return: tuple of (stdout, stderr, returncode) """ cmd, root, atom_str = argv + eapi = self.settings.get('EAPI') + allow_repo = eapi_has_repo_deps(eapi) try: - atom = Atom(atom_str) + atom = Atom(atom_str, allow_repo=allow_repo) except InvalidAtom: return ('', 'invalid atom: %s\n' % atom_str, 2) warnings = [] try: - atom = Atom(atom_str, eapi=self.settings.get('EAPI')) + atom = Atom(atom_str, allow_repo=allow_repo, eapi=eapi) except InvalidAtom as e: warnings.append(_unicode_decode("QA Notice: %s: %s") % (cmd, e)) diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.pyo b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.pyo Binary files differnew file mode 100644 index 0000000..0e9ee96 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.pyo b/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.pyo Binary files differnew file mode 100644 index 0000000..d9f8d25 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/_metadata_invalid.py b/portage_with_autodep/pym/portage/package/ebuild/_metadata_invalid.py new file mode 100644 index 0000000..bcf1f7f --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_metadata_invalid.py @@ -0,0 +1,41 @@ +# Copyright 2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import textwrap + +import portage +from portage.dep import _repo_separator +from portage.elog import elog_process +from portage.elog.messages import eerror + +def eapi_invalid(self, cpv, repo_name, settings, + eapi_var, eapi_parsed, eapi_lineno): + + msg = [] + msg.extend(textwrap.wrap(("EAPI assignment in ebuild '%s%s%s' does not" + " conform with PMS section 7.3.1 (see bug #402167):") % + (cpv, _repo_separator, repo_name), 70)) + + if not eapi_parsed: + # None means the assignment was not found, while an + # empty string indicates an (invalid) empty assingment. + msg.append( + "\tvalid EAPI assignment must" + " occur on or before line: %s" % + eapi_lineno) + else: + msg.append(("\tbash returned EAPI '%s' which does not match " + "assignment on line: %s") % + (eapi_var, eapi_lineno)) + + if portage.data.secpass >= 2: + # TODO: improve elog permission error handling (bug #416231) + for line in msg: + eerror(line, phase="other", key=cpv) + elog_process(cpv, settings, + phasefilter=("other",)) + + else: + out = portage.output.EOutput() + for line in msg: + out.eerror(line) diff --git a/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py new file mode 100644 index 0000000..44e2576 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py @@ -0,0 +1,43 @@ +# Copyright 2012 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import portage +from portage import os +from portage.exception import (FileNotFound, + PermissionDenied, PortagePackageException) +from portage.localization import _ +from portage.util._async.ForkProcess import ForkProcess + +class ManifestProcess(ForkProcess): + + __slots__ = ("cp", "distdir", "fetchlist_dict", "repo_config") + + MODIFIED = 16 + + def _run(self): + mf = self.repo_config.load_manifest( + os.path.join(self.repo_config.location, self.cp), + self.distdir, fetchlist_dict=self.fetchlist_dict) + + try: + mf.create(assumeDistHashesAlways=True) + except FileNotFound as e: + portage.writemsg(_("!!! File %s doesn't exist, can't update " + "Manifest\n") % e, noiselevel=-1) + return 1 + + except PortagePackageException as e: + portage.writemsg(("!!! %s\n") % (e,), noiselevel=-1) + return 1 + + try: + modified = mf.write(sign=False) + except PermissionDenied as e: + portage.writemsg("!!! %s: %s\n" % (_("Permission Denied"), e,), + noiselevel=-1) + return 1 + else: + if modified: + return self.MODIFIED + else: + return os.EX_OK diff --git a/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py new file mode 100644 index 0000000..38ac482 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py @@ -0,0 +1,93 @@ +# Copyright 2012-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import portage +from portage import os +from portage.dep import _repo_separator +from portage.exception import InvalidDependString +from portage.localization import _ +from portage.util._async.AsyncScheduler import AsyncScheduler +from .ManifestTask import ManifestTask + +class ManifestScheduler(AsyncScheduler): + + def __init__(self, portdb, cp_iter=None, + gpg_cmd=None, gpg_vars=None, force_sign_key=None, **kwargs): + + AsyncScheduler.__init__(self, **kwargs) + + self._portdb = portdb + + if cp_iter is None: + cp_iter = self._iter_every_cp() + self._cp_iter = cp_iter + self._gpg_cmd = gpg_cmd + self._gpg_vars = gpg_vars + self._force_sign_key = force_sign_key + self._task_iter = self._iter_tasks() + + def _next_task(self): + return next(self._task_iter) + + def _iter_every_cp(self): + # List categories individually, in order to start yielding quicker, + # and in order to reduce latency in case of a signal interrupt. + cp_all = self._portdb.cp_all + for category in sorted(self._portdb.categories): + for cp in cp_all(categories=(category,)): + yield cp + + def _iter_tasks(self): + portdb = self._portdb + distdir = portdb.settings["DISTDIR"] + disabled_repos = set() + + for cp in self._cp_iter: + if self._terminated.is_set(): + break + # We iterate over portdb.porttrees, since it's common to + # tweak this attribute in order to adjust repo selection. + for mytree in portdb.porttrees: + if self._terminated.is_set(): + break + repo_config = portdb.repositories.get_repo_for_location(mytree) + if not repo_config.create_manifest: + if repo_config.name not in disabled_repos: + disabled_repos.add(repo_config.name) + portage.writemsg( + _(">>> Skipping creating Manifest for %s%s%s; " + "repository is configured to not use them\n") % + (cp, _repo_separator, repo_config.name), + noiselevel=-1) + continue + cpv_list = portdb.cp_list(cp, mytree=[repo_config.location]) + if not cpv_list: + continue + fetchlist_dict = {} + try: + for cpv in cpv_list: + fetchlist_dict[cpv] = \ + list(portdb.getFetchMap(cpv, mytree=mytree)) + except InvalidDependString as e: + portage.writemsg( + _("!!! %s%s%s: SRC_URI: %s\n") % + (cp, _repo_separator, repo_config.name, e), + noiselevel=-1) + self._error_count += 1 + continue + + yield ManifestTask(cp=cp, distdir=distdir, + fetchlist_dict=fetchlist_dict, repo_config=repo_config, + gpg_cmd=self._gpg_cmd, gpg_vars=self._gpg_vars, + force_sign_key=self._force_sign_key) + + def _task_exit(self, task): + + if task.returncode != os.EX_OK: + if not self._terminated_tasks: + portage.writemsg( + "Error processing %s%s%s, continuing...\n" % + (task.cp, _repo_separator, task.repo_config.name), + noiselevel=-1) + + AsyncScheduler._task_exit(self, task) diff --git a/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py new file mode 100644 index 0000000..0ee2b91 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py @@ -0,0 +1,186 @@ +# Copyright 2012-2013 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +import errno +import re +import subprocess + +from portage import os +from portage import _unicode_encode, _encodings +from portage.const import MANIFEST2_IDENTIFIERS +from portage.util import (atomic_ofstream, grablines, + shlex_split, varexpand, writemsg) +from portage.util._async.PipeLogger import PipeLogger +from portage.util._async.PopenProcess import PopenProcess +from _emerge.CompositeTask import CompositeTask +from _emerge.PipeReader import PipeReader +from .ManifestProcess import ManifestProcess + +class ManifestTask(CompositeTask): + + __slots__ = ("cp", "distdir", "fetchlist_dict", "gpg_cmd", + "gpg_vars", "repo_config", "force_sign_key", "_manifest_path") + + _PGP_HEADER = b"BEGIN PGP SIGNED MESSAGE" + _manifest_line_re = re.compile(r'^(%s) ' % "|".join(MANIFEST2_IDENTIFIERS)) + _gpg_key_id_re = re.compile(r'^[0-9A-F]*$') + _gpg_key_id_lengths = (8, 16, 24, 32, 40) + + def _start(self): + self._manifest_path = os.path.join(self.repo_config.location, + self.cp, "Manifest") + manifest_proc = ManifestProcess(cp=self.cp, distdir=self.distdir, + fetchlist_dict=self.fetchlist_dict, repo_config=self.repo_config, + scheduler=self.scheduler) + self._start_task(manifest_proc, self._manifest_proc_exit) + + def _manifest_proc_exit(self, manifest_proc): + self._assert_current(manifest_proc) + if manifest_proc.returncode not in (os.EX_OK, manifest_proc.MODIFIED): + self.returncode = manifest_proc.returncode + self._current_task = None + self.wait() + return + + modified = manifest_proc.returncode == manifest_proc.MODIFIED + sign = self.gpg_cmd is not None + + if not modified and sign: + sign = self._need_signature() + if not sign and self.force_sign_key is not None \ + and os.path.exists(self._manifest_path): + self._check_sig_key() + return + + if not sign or not os.path.exists(self._manifest_path): + self.returncode = os.EX_OK + self._current_task = None + self.wait() + return + + self._start_gpg_proc() + + def _check_sig_key(self): + null_fd = os.open('/dev/null', os.O_RDONLY) + popen_proc = PopenProcess(proc=subprocess.Popen( + ["gpg", "--verify", self._manifest_path], + stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT), + pipe_reader=PipeReader()) + os.close(null_fd) + popen_proc.pipe_reader.input_files = { + "producer" : popen_proc.proc.stdout} + self._start_task(popen_proc, self._check_sig_key_exit) + + @staticmethod + def _parse_gpg_key(output): + """ + Returns the first token which appears to represent a gpg key + id, or None if there is no such token. + """ + regex = ManifestTask._gpg_key_id_re + lengths = ManifestTask._gpg_key_id_lengths + for token in output.split(): + m = regex.match(token) + if m is not None and len(m.group(0)) in lengths: + return m.group(0) + return None + + @staticmethod + def _normalize_gpg_key(key_str): + """ + Strips leading "0x" and trailing "!", and converts to uppercase + (intended to be the same format as that in gpg --verify output). + """ + key_str = key_str.upper() + if key_str.startswith("0X"): + key_str = key_str[2:] + key_str = key_str.rstrip("!") + return key_str + + def _check_sig_key_exit(self, proc): + self._assert_current(proc) + + parsed_key = self._parse_gpg_key( + proc.pipe_reader.getvalue().decode('utf_8', 'replace')) + if parsed_key is not None and \ + self._normalize_gpg_key(parsed_key) == \ + self._normalize_gpg_key(self.force_sign_key): + self.returncode = os.EX_OK + self._current_task = None + self.wait() + return + + if self._was_cancelled(): + self.wait() + return + + self._strip_sig(self._manifest_path) + self._start_gpg_proc() + + @staticmethod + def _strip_sig(manifest_path): + """ + Strip an existing signature from a Manifest file. + """ + line_re = ManifestTask._manifest_line_re + lines = grablines(manifest_path) + f = None + try: + f = atomic_ofstream(manifest_path) + for line in lines: + if line_re.match(line) is not None: + f.write(line) + f.close() + f = None + finally: + if f is not None: + f.abort() + + def _start_gpg_proc(self): + gpg_vars = self.gpg_vars + if gpg_vars is None: + gpg_vars = {} + else: + gpg_vars = gpg_vars.copy() + gpg_vars["FILE"] = self._manifest_path + gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars) + gpg_cmd = shlex_split(gpg_cmd) + gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT)) + # PipeLogger echos output and efficiently monitors for process + # exit by listening for the stdout EOF event. + gpg_proc.pipe_reader = PipeLogger(background=self.background, + input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler) + self._start_task(gpg_proc, self._gpg_proc_exit) + + def _gpg_proc_exit(self, gpg_proc): + if self._default_exit(gpg_proc) != os.EX_OK: + self.wait() + return + + rename_args = (self._manifest_path + ".asc", self._manifest_path) + try: + os.rename(*rename_args) + except OSError as e: + writemsg("!!! rename('%s', '%s'): %s\n" % rename_args + (e,), + noiselevel=-1) + try: + os.unlink(self._manifest_path + ".asc") + except OSError: + pass + self.returncode = 1 + else: + self.returncode = os.EX_OK + + self._current_task = None + self.wait() + + def _need_signature(self): + try: + with open(_unicode_encode(self._manifest_path, + encoding=_encodings['fs'], errors='strict'), 'rb') as f: + return self._PGP_HEADER not in f.readline() + except IOError as e: + if e.errno in (errno.ENOENT, errno.ESTALE): + return False + raise diff --git a/portage_with_autodep/pym/portage/tests/resolver/__init__.py b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/__init__.py index 21a391a..418ad86 100644 --- a/portage_with_autodep/pym/portage/tests/resolver/__init__.py +++ b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/__init__.py @@ -1,2 +1,2 @@ -# Copyright 2010 Gentoo Foundation +# Copyright 2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 diff --git a/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py index befdc89..94f8c79 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py +++ b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py @@ -1,10 +1,10 @@ # Copyright 2010-2011 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 -import shutil import tempfile from portage import os +from portage import shutil from portage.const import EBUILD_PHASES from portage.elog import elog_process from portage.package.ebuild.config import config @@ -20,7 +20,11 @@ def spawn_nofetch(portdb, ebuild_path, settings=None): to cache metadata. It will be cloned internally, in order to prevent any changes from interfering with the calling code. If settings is None then a suitable config instance will be - acquired from the given portdbapi instance. + acquired from the given portdbapi instance. Do not use the + settings parameter unless setcpv has been called on the given + instance, since otherwise it's possible to trigger issues like + bug #408817 due to fragile assumptions involving the config + state inside doebuild_environment(). A private PORTAGE_BUILDDIR will be created and cleaned up, in order to avoid any interference with any other processes. diff --git a/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.pyo b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.pyo Binary files differnew file mode 100644 index 0000000..ac449ea --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/config.py b/portage_with_autodep/pym/portage/package/ebuild/config.py index a8c6ad6..97cbd99 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/config.py +++ b/portage_with_autodep/pym/portage/package/ebuild/config.py @@ -1,4 +1,4 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = [ @@ -7,7 +7,10 @@ __all__ = [ import copy from itertools import chain +import grp import logging +import platform +import pwd import re import sys import warnings @@ -21,10 +24,9 @@ from portage import bsd_chflags, \ load_mod, os, selinux, _unicode_decode from portage.const import CACHE_PATH, \ DEPCACHE_PATH, INCREMENTALS, MAKE_CONF_FILE, \ - MODULES_FILE_PATH, PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, \ + MODULES_FILE_PATH, \ PRIVATE_PATH, PROFILE_PATH, USER_CONFIG_PATH, \ USER_VIRTUALS_FILE -from portage.const import _SANDBOX_COMPAT_LEVEL from portage.dbapi import dbapi from portage.dbapi.porttree import portdbapi from portage.dbapi.vartree import vartree @@ -41,7 +43,7 @@ from portage.util import ensure_dirs, getconfig, grabdict, \ grabdict_package, grabfile, grabfile_package, LazyItemsDict, \ normalize_path, shlex_split, stack_dictlist, stack_dicts, stack_lists, \ writemsg, writemsg_level -from portage.versions import catpkgsplit, catsplit, cpv_getkey +from portage.versions import catpkgsplit, catsplit, cpv_getkey, _pkg_str from portage.package.ebuild._config import special_env_vars from portage.package.ebuild._config.env_var_validation import validate_cmd_var @@ -120,11 +122,19 @@ class config(object): virtuals ...etc you look in here. """ + _constant_keys = frozenset(['PORTAGE_BIN_PATH', 'PORTAGE_GID', + 'PORTAGE_PYM_PATH']) + _setcpv_aux_keys = ('DEFINED_PHASES', 'DEPEND', 'EAPI', 'INHERITED', 'IUSE', 'REQUIRED_USE', 'KEYWORDS', 'LICENSE', 'PDEPEND', 'PROPERTIES', 'PROVIDE', 'RDEPEND', 'SLOT', 'repository', 'RESTRICT', 'LICENSE',) + _module_aliases = { + "cache.metadata_overlay.database" : "portage.cache.flat_hash.database", + "portage.cache.metadata_overlay.database" : "portage.cache.flat_hash.database", + } + _case_insensitive_vars = special_env_vars.case_insensitive_vars _default_globals = special_env_vars.default_globals _env_blacklist = special_env_vars.env_blacklist @@ -135,7 +145,8 @@ class config(object): def __init__(self, clone=None, mycpv=None, config_profile_path=None, config_incrementals=None, config_root=None, target_root=None, - _eprefix=None, local_config=True, env=None, _unmatched_removal=False): + eprefix=None, local_config=True, env=None, + _unmatched_removal=False): """ @param clone: If provided, init will use deepcopy to copy by value the instance. @type clone: Instance of config class. @@ -151,8 +162,8 @@ class config(object): @type config_root: String @param target_root: __init__ override of $ROOT env variable. @type target_root: String - @param _eprefix: set the EPREFIX variable (private, used by internal tests) - @type _eprefix: String + @param eprefix: set the EPREFIX variable (default is portage.const.EPREFIX) + @type eprefix: String @param local_config: Enables loading of local config (/etc/portage); used most by repoman to ignore local config (keywording and unmasking) @type local_config: Boolean @@ -164,10 +175,6 @@ class config(object): @type _unmatched_removal: Boolean """ - # rename local _eprefix variable for convenience - eprefix = _eprefix - del _eprefix - # When initializing the global portage.settings instance, avoid # raising exceptions whenever possible since exceptions thrown # from 'import portage' or 'import portage.exceptions' statements @@ -208,6 +215,7 @@ class config(object): self.repositories = clone.repositories self._iuse_implicit_match = clone._iuse_implicit_match self._non_user_variables = clone._non_user_variables + self._env_d_blacklist = clone._env_d_blacklist self._repo_make_defaults = clone._repo_make_defaults self.usemask = clone.usemask self.useforce = clone.useforce @@ -272,9 +280,6 @@ class config(object): eprefix = locations_manager.eprefix config_root = locations_manager.config_root - self.profiles = locations_manager.profiles - self.profile_path = locations_manager.profile_path - self.user_profile_dir = locations_manager.user_profile_dir abs_user_config = locations_manager.abs_user_config make_conf = getconfig( @@ -293,6 +298,38 @@ class config(object): eroot = locations_manager.eroot self.global_config_path = locations_manager.global_config_path + # The expand_map is used for variable substitution + # in getconfig() calls, and the getconfig() calls + # update expand_map with the value of each variable + # assignment that occurs. Variable substitution occurs + # in the following order, which corresponds to the + # order of appearance in self.lookuplist: + # + # * env.d + # * make.globals + # * make.defaults + # * make.conf + # + # Notably absent is "env", since we want to avoid any + # interaction with the calling environment that might + # lead to unexpected results. + + env_d = getconfig(os.path.join(eroot, "etc", "profile.env"), + expand=False) or {} + expand_map = env_d.copy() + self._expand_map = expand_map + + # Allow make.globals to set default paths relative to ${EPREFIX}. + expand_map["EPREFIX"] = eprefix + + make_globals = getconfig(os.path.join( + self.global_config_path, 'make.globals'), expand=expand_map) + if make_globals is None: + make_globals = {} + + for k, v in self._default_globals.items(): + make_globals.setdefault(k, v) + if config_incrementals is None: self.incrementals = INCREMENTALS else: @@ -302,14 +339,20 @@ class config(object): self.module_priority = ("user", "default") self.modules = {} - modules_loader = KeyValuePairFileLoader( - os.path.join(config_root, MODULES_FILE_PATH), None, None) + modules_file = os.path.join(config_root, MODULES_FILE_PATH) + modules_loader = KeyValuePairFileLoader(modules_file, None, None) modules_dict, modules_errors = modules_loader.load() self.modules["user"] = modules_dict if self.modules["user"] is None: self.modules["user"] = {} + user_auxdbmodule = \ + self.modules["user"].get("portdbapi.auxdbmodule") + if user_auxdbmodule is not None and \ + user_auxdbmodule in self._module_aliases: + warnings.warn("'%s' is deprecated: %s" % + (user_auxdbmodule, modules_file)) + self.modules["default"] = { - "portdbapi.metadbmodule": "portage.cache.metadata.database", "portdbapi.auxdbmodule": "portage.cache.flat_hash.database", } @@ -328,43 +371,9 @@ class config(object): self.configlist.append({}) self.configdict["pkginternal"] = self.configlist[-1] - self.packages_list = [grabfile_package(os.path.join(x, "packages"), verify_eapi=True) for x in self.profiles] - self.packages = tuple(stack_lists(self.packages_list, incremental=1)) - del self.packages_list - #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1) - - # revmaskdict - self.prevmaskdict={} - for x in self.packages: - # Negative atoms are filtered by the above stack_lists() call. - if not isinstance(x, Atom): - x = Atom(x.lstrip('*')) - self.prevmaskdict.setdefault(x.cp, []).append(x) - - # The expand_map is used for variable substitution - # in getconfig() calls, and the getconfig() calls - # update expand_map with the value of each variable - # assignment that occurs. Variable substitution occurs - # in the following order, which corresponds to the - # order of appearance in self.lookuplist: - # - # * env.d - # * make.globals - # * make.defaults - # * make.conf - # - # Notably absent is "env", since we want to avoid any - # interaction with the calling environment that might - # lead to unexpected results. - expand_map = {} - self._expand_map = expand_map - - env_d = getconfig(os.path.join(eroot, "etc", "profile.env"), - expand=expand_map) # env_d will be None if profile.env doesn't exist. if env_d: self.configdict["env.d"].update(env_d) - expand_map.update(env_d) # backupenv is used for calculating incremental variables. if env is None: @@ -390,40 +399,72 @@ class config(object): self.configdict["env"] = LazyItemsDict(self.backupenv) - for x in (self.global_config_path,): - self.mygcfg = getconfig(os.path.join(x, "make.globals"), - expand=expand_map) - if self.mygcfg: - break + self.configlist.append(make_globals) + self.configdict["globals"]=self.configlist[-1] - if self.mygcfg is None: - self.mygcfg = {} + self.make_defaults_use = [] - for k, v in self._default_globals.items(): - self.mygcfg.setdefault(k, v) + #Loading Repositories + self["PORTAGE_CONFIGROOT"] = config_root + self["ROOT"] = target_root + self["EPREFIX"] = eprefix + self["EROOT"] = eroot + known_repos = [] + portdir = "" + portdir_overlay = "" + for confs in [make_globals, make_conf, self.configdict["env"]]: + v = confs.get("PORTDIR") + if v is not None: + portdir = v + known_repos.append(v) + v = confs.get("PORTDIR_OVERLAY") + if v is not None: + portdir_overlay = v + known_repos.extend(shlex_split(v)) + known_repos = frozenset(known_repos) + self["PORTDIR"] = portdir + self["PORTDIR_OVERLAY"] = portdir_overlay + self.lookuplist = [self.configdict["env"]] + self.repositories = load_repository_config(self) - self.configlist.append(self.mygcfg) - self.configdict["globals"]=self.configlist[-1] + locations_manager.load_profiles(self.repositories, known_repos) - self.make_defaults_use = [] - self.mygcfg = {} + profiles_complex = locations_manager.profiles_complex + self.profiles = locations_manager.profiles + self.profile_path = locations_manager.profile_path + self.user_profile_dir = locations_manager.user_profile_dir + + packages_list = [grabfile_package(os.path.join(x, "packages"), + verify_eapi=True) for x in self.profiles] + self.packages = tuple(stack_lists(packages_list, incremental=1)) + + # revmaskdict + self.prevmaskdict={} + for x in self.packages: + # Negative atoms are filtered by the above stack_lists() call. + if not isinstance(x, Atom): + x = Atom(x.lstrip('*')) + self.prevmaskdict.setdefault(x.cp, []).append(x) + + + mygcfg = {} if self.profiles: mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"), expand=expand_map) for x in self.profiles] self._make_defaults = mygcfg_dlists - self.mygcfg = stack_dicts(mygcfg_dlists, + mygcfg = stack_dicts(mygcfg_dlists, incrementals=self.incrementals) - if self.mygcfg is None: - self.mygcfg = {} - self.configlist.append(self.mygcfg) + if mygcfg is None: + mygcfg = {} + self.configlist.append(mygcfg) self.configdict["defaults"]=self.configlist[-1] - self.mygcfg = getconfig( + mygcfg = getconfig( os.path.join(config_root, MAKE_CONF_FILE), tolerant=tolerant, allow_sourcing=True, expand=expand_map) or {} - self.mygcfg.update(getconfig( + mygcfg.update(getconfig( os.path.join(abs_user_config, 'make.conf'), tolerant=tolerant, allow_sourcing=True, expand=expand_map) or {}) @@ -439,10 +480,18 @@ class config(object): non_user_variables = frozenset(non_user_variables) self._non_user_variables = non_user_variables + self._env_d_blacklist = frozenset(chain( + profile_only_variables, + self._env_blacklist, + )) + env_d = self.configdict["env.d"] + for k in self._env_d_blacklist: + env_d.pop(k, None) + for k in profile_only_variables: - self.mygcfg.pop(k, None) + mygcfg.pop(k, None) - self.configlist.append(self.mygcfg) + self.configlist.append(mygcfg) self.configdict["conf"]=self.configlist[-1] self.configlist.append(LazyItemsDict()) @@ -472,25 +521,23 @@ class config(object): self["ROOT"] = target_root self.backup_changes("ROOT") + # The PORTAGE_OVERRIDE_EPREFIX variable propagates the EPREFIX + # of this config instance to any portage commands or API + # consumers running in subprocesses. self["EPREFIX"] = eprefix self.backup_changes("EPREFIX") + self["PORTAGE_OVERRIDE_EPREFIX"] = eprefix + self.backup_changes("PORTAGE_OVERRIDE_EPREFIX") self["EROOT"] = eroot self.backup_changes("EROOT") - self["PORTAGE_SANDBOX_COMPAT_LEVEL"] = _SANDBOX_COMPAT_LEVEL - self.backup_changes("PORTAGE_SANDBOX_COMPAT_LEVEL") - self._ppropertiesdict = portage.dep.ExtendedAtomDict(dict) self._penvdict = portage.dep.ExtendedAtomDict(dict) - #Loading Repositories - self.repositories = load_repository_config(self) - #filling PORTDIR and PORTDIR_OVERLAY variable for compatibility main_repo = self.repositories.mainRepo() if main_repo is not None: - main_repo = main_repo.user_location - self["PORTDIR"] = main_repo + self["PORTDIR"] = main_repo.user_location self.backup_changes("PORTDIR") # repoman controls PORTDIR_OVERLAY via the environment, so no @@ -501,11 +548,11 @@ class config(object): new_ov = [] if portdir_overlay: - whitespace_re = re.compile(r"\s") + shell_quote_re = re.compile(r"[\s\\\"'$`]") for ov in portdir_overlay: ov = normalize_path(ov) if os.path.isdir(ov): - if whitespace_re.search(ov) is not None: + if shell_quote_re.search(ov) is not None: ov = portage._shell_quote(ov) new_ov.append(ov) else: @@ -528,11 +575,11 @@ class config(object): self._repo_make_defaults[repo.name] = d #Read package.keywords and package.accept_keywords. - self._keywords_manager = KeywordsManager(self.profiles, abs_user_config, \ + self._keywords_manager = KeywordsManager(profiles_complex, abs_user_config, \ local_config, global_accept_keywords=self.configdict["defaults"].get("ACCEPT_KEYWORDS", "")) #Read all USE related files from profiles and optionally from user config. - self._use_manager = UseManager(self.repositories, self.profiles, abs_user_config, user_config=local_config) + self._use_manager = UseManager(self.repositories, profiles_complex, abs_user_config, user_config=local_config) #Initialize all USE related variables we track ourselves. self.usemask = self._use_manager.getUseMask() self.useforce = self._use_manager.getUseForce() @@ -549,7 +596,7 @@ class config(object): self.configdict["conf"].get("ACCEPT_LICENSE", "")) #Read package.mask and package.unmask from profiles and optionally from user config - self._mask_manager = MaskManager(self.repositories, self.profiles, + self._mask_manager = MaskManager(self.repositories, profiles_complex, abs_user_config, user_config=local_config, strict_umatched_removal=_unmatched_removal) @@ -597,16 +644,21 @@ class config(object): self.categories = [grabfile(os.path.join(x, "categories")) \ for x in locations_manager.profile_and_user_locations] category_re = dbapi._category_re - self.categories = tuple(sorted( + # categories used to be a tuple, but now we use a frozenset + # for hashed category validation in pordbapi.cp_list() + self.categories = frozenset( x for x in stack_lists(self.categories, incremental=1) - if category_re.match(x) is not None)) + if category_re.match(x) is not None) archlist = [grabfile(os.path.join(x, "arch.list")) \ for x in locations_manager.profile_and_user_locations] archlist = stack_lists(archlist, incremental=1) self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist) - pkgprovidedlines = [grabfile(os.path.join(x, "package.provided"), recursive=1) for x in self.profiles] + pkgprovidedlines = [grabfile( + os.path.join(x.location, "package.provided"), + recursive=x.portage1_directories) + for x in profiles_complex] pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1) has_invalid_data = False for x in range(len(pkgprovidedlines)-1, -1, -1): @@ -649,9 +701,6 @@ class config(object): if "USE_ORDER" not in self: self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:repo:env.d" - self["PORTAGE_GID"] = str(portage_gid) - self.backup_changes("PORTAGE_GID") - self.depcachedir = DEPCACHE_PATH if eprefix: # See comments about make.globals and EPREFIX @@ -678,19 +727,60 @@ class config(object): self["CBUILD"] = self["CHOST"] self.backup_changes("CBUILD") - self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH - self.backup_changes("PORTAGE_BIN_PATH") - self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH - self.backup_changes("PORTAGE_PYM_PATH") + if "USERLAND" not in self: + # Set default USERLAND so that our test cases can assume that + # it's always set. This allows isolated-functions.sh to avoid + # calling uname -s when sourced. + system = platform.system() + if system is not None and \ + (system.endswith("BSD") or system == "DragonFly"): + self["USERLAND"] = "BSD" + else: + self["USERLAND"] = "GNU" + self.backup_changes("USERLAND") - for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"): + default_inst_ids = { + "PORTAGE_INST_GID": "0", + "PORTAGE_INST_UID": "0", + } + + if eprefix: + # For prefix environments, default to the UID and GID of + # the top-level EROOT directory. try: - self[var] = str(int(self.get(var, "0"))) + eroot_st = os.stat(eroot) + except OSError: + pass + else: + default_inst_ids["PORTAGE_INST_GID"] = str(eroot_st.st_gid) + default_inst_ids["PORTAGE_INST_UID"] = str(eroot_st.st_uid) + + if "PORTAGE_USERNAME" not in self: + try: + pwd_struct = pwd.getpwuid(eroot_st.st_uid) + except KeyError: + pass + else: + self["PORTAGE_USERNAME"] = pwd_struct.pw_name + self.backup_changes("PORTAGE_USERNAME") + + if "PORTAGE_GRPNAME" not in self: + try: + grp_struct = grp.getgrgid(eroot_st.st_gid) + except KeyError: + pass + else: + self["PORTAGE_GRPNAME"] = grp_struct.gr_name + self.backup_changes("PORTAGE_GRPNAME") + + for var, default_val in default_inst_ids.items(): + try: + self[var] = str(int(self.get(var, default_val))) except ValueError: writemsg(_("!!! %s='%s' is not a valid integer. " - "Falling back to '0'.\n") % (var, self[var]), + "Falling back to %s.\n") % (var, self[var], default_val), noiselevel=-1) - self[var] = "0" + self[var] = default_val self.backup_changes(var) # initialize self.features @@ -699,21 +789,33 @@ class config(object): if bsd_chflags: self.features.add('chflags') - if 'parse-eapi-ebuild-head' in self.features: - portage._validate_cache_for_unsupported_eapis = False - self._iuse_implicit_match = _iuse_implicit_match_cache(self) self._validate_commands() - for k in self._case_insensitive_vars: - if k in self: - self[k] = self[k].lower() - self.backup_changes(k) + for k in self._case_insensitive_vars: + if k in self: + self[k] = self[k].lower() + self.backup_changes(k) + + if main_repo is not None and not main_repo.sync: + main_repo_sync = self.get("SYNC") + if main_repo_sync: + main_repo.sync = main_repo_sync + + # The first constructed config object initializes these modules, + # and subsequent calls to the _init() functions have no effect. + portage.output._init(config_root=self['PORTAGE_CONFIGROOT']) + portage.data._init(self) if mycpv: self.setcpv(mycpv) + @property + def mygcfg(self): + warnings.warn("portage.config.mygcfg is deprecated", stacklevel=3) + return {} + def _validate_commands(self): for k in special_env_vars.validate_commands: v = self.get(k) @@ -817,11 +919,26 @@ class config(object): writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group), noiselevel=-1) - abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"], - PROFILE_PATH) - if (not self.profile_path or \ - not os.path.exists(os.path.join(self.profile_path, "parent"))) and \ - os.path.exists(os.path.join(self["PORTDIR"], "profiles")): + profile_broken = not self.profile_path or \ + not os.path.exists(os.path.join(self.profile_path, "parent")) and \ + os.path.exists(os.path.join(self["PORTDIR"], "profiles")) + + if profile_broken: + abs_profile_path = None + for x in (PROFILE_PATH, 'etc/portage/make.profile'): + x = os.path.join(self["PORTAGE_CONFIGROOT"], x) + try: + os.lstat(x) + except OSError: + pass + else: + abs_profile_path = x + break + + if abs_profile_path is None: + abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"], + PROFILE_PATH) + writemsg(_("\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path, noiselevel=-1) writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"]) @@ -851,13 +968,32 @@ class config(object): writemsg(_("!!! FEATURES=fakeroot is enabled, but the " "fakeroot binary is not installed.\n"), noiselevel=-1) + if os.getuid() == 0 and not hasattr(os, "setgroups"): + warning_shown = False + + if "userpriv" in self.features: + writemsg(_("!!! FEATURES=userpriv is enabled, but " + "os.setgroups is not available.\n"), noiselevel=-1) + warning_shown = True + + if "userfetch" in self.features: + writemsg(_("!!! FEATURES=userfetch is enabled, but " + "os.setgroups is not available.\n"), noiselevel=-1) + warning_shown = True + + if warning_shown and platform.python_implementation() == 'PyPy': + writemsg(_("!!! See https://bugs.pypy.org/issue833 for details.\n"), + noiselevel=-1) + def load_best_module(self,property_string): best_mod = best_from_dict(property_string,self.modules,self.module_priority) mod = None try: mod = load_mod(best_mod) except ImportError: - if not best_mod.startswith("cache."): + if best_mod in self._module_aliases: + mod = load_mod(self._module_aliases[best_mod]) + elif not best_mod.startswith("cache."): raise else: best_mod = "portage." + best_mod @@ -1099,8 +1235,11 @@ class config(object): # packages since we want to save it PORTAGE_BUILT_USE for # evaluating conditional USE deps in atoms passed via IPC to # helpers like has_version and best_version. + aux_keys = set(aux_keys) + if hasattr(mydb, '_aux_cache_keys'): + aux_keys = aux_keys.intersection(mydb._aux_cache_keys) + aux_keys.add('USE') aux_keys = list(aux_keys) - aux_keys.append('USE') for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)): pkg_configdict[k] = v built_use = frozenset(pkg_configdict.pop('USE').split()) @@ -1115,7 +1254,7 @@ class config(object): slot = pkg_configdict["SLOT"] iuse = pkg_configdict["IUSE"] if pkg is None: - cpv_slot = "%s:%s" % (self.mycpv, slot) + cpv_slot = _pkg_str(self.mycpv, slot=slot, repo=repository) else: cpv_slot = pkg pkginternaluse = [] @@ -1462,6 +1601,9 @@ class config(object): @return: A matching profile atom string or None if one is not found. """ + warnings.warn("The config._getProfileMaskAtom() method is deprecated.", + DeprecationWarning, stacklevel=2) + cp = cpv_getkey(cpv) profile_atoms = self.prevmaskdict.get(cp) if profile_atoms: @@ -1564,11 +1706,13 @@ class config(object): @return: A list of properties that have not been accepted. """ accept_properties = self._accept_properties + if not hasattr(cpv, 'slot'): + cpv = _pkg_str(cpv, slot=metadata["SLOT"], + repo=metadata.get("repository")) cp = cpv_getkey(cpv) cpdict = self._ppropertiesdict.get(cp) if cpdict: - cpv_slot = "%s:%s" % (cpv, metadata["SLOT"]) - pproperties_list = ordered_by_atom_specificity(cpdict, cpv_slot, repo=metadata.get('repository')) + pproperties_list = ordered_by_atom_specificity(cpdict, cpv) if pproperties_list: accept_properties = list(self._accept_properties) for x in pproperties_list: @@ -1699,6 +1843,8 @@ class config(object): env_d = getconfig(env_d_filename, expand=False) if env_d: # env_d will be None if profile.env doesn't exist. + for k in self._env_d_blacklist: + env_d.pop(k, None) self.configdict["env.d"].update(env_d) def regenerate(self, useonly=0, use_cache=None): @@ -2025,25 +2171,43 @@ class config(object): self._virtuals_manager._treeVirtuals = {} def __delitem__(self,mykey): - self.modifying() - for x in self.lookuplist: - if x != None: - if mykey in x: - del x[mykey] + self.pop(mykey) + + def __getitem__(self, key): + try: + return self._getitem(key) + except KeyError: + return '' # for backward compat, don't raise KeyError + + def _getitem(self, mykey): + + if mykey in self._constant_keys: + # These two point to temporary values when + # portage plans to update itself. + if mykey == "PORTAGE_BIN_PATH": + return portage._bin_path + elif mykey == "PORTAGE_PYM_PATH": + return portage._pym_path + + elif mykey == "PORTAGE_GID": + return _unicode_decode(str(portage_gid)) - def __getitem__(self,mykey): for d in self.lookuplist: - if mykey in d: + try: return d[mykey] - return '' # for backward compat, don't raise KeyError + except KeyError: + pass + + raise KeyError(mykey) def get(self, k, x=None): - for d in self.lookuplist: - if k in d: - return d[k] - return x + try: + return self._getitem(k) + except KeyError: + return x def pop(self, key, *args): + self.modifying() if len(args) > 1: raise TypeError( "pop expected at most 2 arguments, got " + \ @@ -2059,10 +2223,12 @@ class config(object): def __contains__(self, mykey): """Called to implement membership test operators (in and not in).""" - for d in self.lookuplist: - if mykey in d: - return True - return False + try: + self._getitem(mykey) + except KeyError: + return False + else: + return True def setdefault(self, k, x=None): v = self.get(k) @@ -2077,6 +2243,7 @@ class config(object): def __iter__(self): keys = set() + keys.update(self._constant_keys) for d in self.lookuplist: keys.update(d) return iter(keys) @@ -2086,7 +2253,7 @@ class config(object): def iteritems(self): for k in self: - yield (k, self[k]) + yield (k, self._getitem(k)) def items(self): return list(self.iteritems()) @@ -2168,8 +2335,14 @@ class config(object): if not eapi_exports_merge_type(eapi): mydict.pop("MERGE_TYPE", None) - # Prefix variables are supported starting with EAPI 3. - if phase == 'depend' or eapi is None or not eapi_supports_prefix(eapi): + # Prefix variables are supported beginning with EAPI 3, or when + # force-prefix is in FEATURES, since older EAPIs would otherwise be + # useless with prefix configurations. This brings compatibility with + # the prefix branch of portage, which also supports EPREFIX for all + # EAPIs (for obvious reasons). + if phase == 'depend' or \ + ('force-prefix' not in self.features and + eapi is not None and not eapi_supports_prefix(eapi)): mydict.pop("ED", None) mydict.pop("EPREFIX", None) mydict.pop("EROOT", None) diff --git a/portage_with_autodep/pym/portage/package/ebuild/config.pyo b/portage_with_autodep/pym/portage/package/ebuild/config.pyo Binary files differnew file mode 100644 index 0000000..742ee2b --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/config.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.pyo b/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.pyo Binary files differnew file mode 100644 index 0000000..2b9362b --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py index 1e34b14..8705639 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py +++ b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py @@ -8,7 +8,6 @@ import warnings from portage import os, _encodings, _unicode_decode from portage.exception import DigestException, FileNotFound from portage.localization import _ -from portage.manifest import Manifest from portage.output import EOutput from portage.util import writemsg @@ -16,7 +15,7 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None): """ Verifies checksums. Assumes all files have been downloaded. @rtype: int - @returns: 1 on success and 0 on failure + @return: 1 on success and 0 on failure """ if justmanifest is not None: @@ -28,49 +27,33 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None): if mysettings.get("EBUILD_SKIP_MANIFEST") == "1": return 1 - allow_missing = "allow-missing-manifests" in mysettings.features pkgdir = mysettings["O"] - manifest_path = os.path.join(pkgdir, "Manifest") - if not os.path.exists(manifest_path): - if allow_missing: - return 1 - writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path, - noiselevel=-1) - if strict: - return 0 - else: - return 1 if mf is None: - mf = Manifest(pkgdir, mysettings["DISTDIR"]) - manifest_empty = True - for d in mf.fhashdict.values(): - if d: - manifest_empty = False - break - if manifest_empty: - writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path, - noiselevel=-1) - if strict: - return 0 - else: - return 1 + mf = mysettings.repositories.get_repo_for_location( + os.path.dirname(os.path.dirname(pkgdir))) + mf = mf.load_manifest(pkgdir, mysettings["DISTDIR"]) eout = EOutput() eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1" try: - if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings: - eout.ebegin(_("checking ebuild checksums ;-)")) - mf.checkTypeHashes("EBUILD") - eout.eend(0) - eout.ebegin(_("checking auxfile checksums ;-)")) - mf.checkTypeHashes("AUX") - eout.eend(0) - eout.ebegin(_("checking miscfile checksums ;-)")) - mf.checkTypeHashes("MISC", ignoreMissingFiles=True) - eout.eend(0) + if not mf.thin and strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings: + if mf.fhashdict.get("EBUILD"): + eout.ebegin(_("checking ebuild checksums ;-)")) + mf.checkTypeHashes("EBUILD") + eout.eend(0) + if mf.fhashdict.get("AUX"): + eout.ebegin(_("checking auxfile checksums ;-)")) + mf.checkTypeHashes("AUX") + eout.eend(0) + if mf.fhashdict.get("MISC"): + eout.ebegin(_("checking miscfile checksums ;-)")) + mf.checkTypeHashes("MISC", ignoreMissingFiles=True) + eout.eend(0) for f in myfiles: eout.ebegin(_("checking %s ;-)") % f) ftype = mf.findFile(f) if ftype is None: + if mf.allow_missing: + continue eout.eend(1) writemsg(_("\n!!! Missing digest for '%s'\n") % (f,), noiselevel=-1) @@ -90,7 +73,7 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None): writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1) writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1) return 0 - if allow_missing: + if mf.thin or mf.allow_missing: # In this case we ignore any missing digests that # would otherwise be detected below. return 1 diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestcheck.pyo b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.pyo Binary files differnew file mode 100644 index 0000000..66987a2 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestgen.py b/portage_with_autodep/pym/portage/package/ebuild/digestgen.py index eb7210e..6ad3397 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/digestgen.py +++ b/portage_with_autodep/pym/portage/package/ebuild/digestgen.py @@ -17,7 +17,6 @@ from portage.dep import use_reduce from portage.exception import InvalidDependString, FileNotFound, \ PermissionDenied, PortagePackageException from portage.localization import _ -from portage.manifest import Manifest from portage.output import colorize from portage.package.ebuild.fetch import fetch from portage.util import writemsg, writemsg_stdout @@ -34,7 +33,7 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None): @param myportdb: a portdbapi instance @type myportdb: portdbapi @rtype: int - @returns: 1 on success and 0 on failure + @return: 1 on success and 0 on failure """ if mysettings is None or myportdb is None: raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.") @@ -52,9 +51,21 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None): del e return 0 mytree = os.path.dirname(os.path.dirname(mysettings["O"])) - manifest1_compat = False - mf = Manifest(mysettings["O"], mysettings["DISTDIR"], - fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat) + try: + mf = mysettings.repositories.get_repo_for_location(mytree) + except KeyError: + # backward compatibility + mytree = os.path.realpath(mytree) + mf = mysettings.repositories.get_repo_for_location(mytree) + + mf = mf.load_manifest(mysettings["O"], mysettings["DISTDIR"], + fetchlist_dict=fetchlist_dict) + + if not mf.allow_create: + writemsg_stdout(_(">>> Skipping creating Manifest for %s; " + "repository is configured to not use them\n") % mysettings["O"]) + return 1 + # Don't require all hashes since that can trigger excessive # fetches when sufficient digests already exist. To ease transition # while Manifest 1 is being removed, only require hashes that will @@ -102,8 +113,6 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None): continue if missing_files: - mytree = os.path.realpath(os.path.dirname( - os.path.dirname(mysettings["O"]))) for myfile in missing_files: uris = set() all_restrict = set() @@ -139,8 +148,7 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None): if not fetch({myfile : uris}, mysettings): myebuild = os.path.join(mysettings["O"], catsplit(cpv)[1] + ".ebuild") - spawn_nofetch(myportdb, myebuild, - settings=mysettings) + spawn_nofetch(myportdb, myebuild) writemsg(_("!!! Fetch failed for %s, can't update " "Manifest\n") % myfile, noiselevel=-1) if myfile in dist_hashes and \ @@ -183,8 +191,6 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None): os.path.join(mysettings["DISTDIR"], filename)): auto_assumed.append(filename) if auto_assumed: - mytree = os.path.realpath( - os.path.dirname(os.path.dirname(mysettings["O"]))) cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:]) pkgs = myportdb.cp_list(cp, mytree=mytree) pkgs.sort() diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestgen.pyo b/portage_with_autodep/pym/portage/package/ebuild/digestgen.pyo Binary files differnew file mode 100644 index 0000000..66876ec --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/digestgen.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/doebuild.py b/portage_with_autodep/pym/portage/package/ebuild/doebuild.py index c76c1ed..610172f 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/doebuild.py +++ b/portage_with_autodep/pym/portage/package/ebuild/doebuild.py @@ -1,4 +1,4 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ['doebuild', 'doebuild_environment', 'spawn', 'spawnebuild'] @@ -10,7 +10,6 @@ from itertools import chain import logging import os as _os import re -import shutil import signal import stat import sys @@ -31,8 +30,8 @@ portage.proxy.lazyimport.lazyimport(globals(), ) from portage import auxdbkeys, bsd_chflags, \ - eapi_is_supported, merge, os, selinux, \ - unmerge, _encodings, _parse_eapi_ebuild_head, _os_merge, \ + eapi_is_supported, merge, os, selinux, shutil, \ + unmerge, _encodings, _os_merge, \ _shell_quote, _unicode_decode, _unicode_encode from portage.const import EBUILD_SH_ENV_FILE, EBUILD_SH_ENV_DIR, \ EBUILD_SH_BINARY, INVALID_ENV_FILE, MISC_SH_BINARY @@ -42,16 +41,16 @@ from portage.dbapi.porttree import _parse_uri_map from portage.dep import Atom, check_required_use, \ human_readable_required_use, paren_enclose, use_reduce from portage.eapi import eapi_exports_KV, eapi_exports_merge_type, \ - eapi_exports_replace_vars, eapi_has_required_use, \ - eapi_has_src_prepare_and_src_configure, eapi_has_pkg_pretend -from portage.elog import elog_process + eapi_exports_replace_vars, eapi_exports_REPOSITORY, \ + eapi_has_required_use, eapi_has_src_prepare_and_src_configure, \ + eapi_has_pkg_pretend +from portage.elog import elog_process, _preload_elog_modules from portage.elog.messages import eerror, eqawarn from portage.exception import DigestException, FileNotFound, \ IncorrectParameter, InvalidDependString, PermissionDenied, \ UnsupportedAPIException from portage.localization import _ -from portage.manifest import Manifest -from portage.output import style_to_ansi_code +from portage.output import colormap from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs from portage.util import apply_recursive_permissions, \ apply_secpass_permissions, noiselimit, normalize_path, \ @@ -116,6 +115,38 @@ def _spawn_phase(phase, settings, actionmap=None, **kwargs): ebuild_phase.wait() return ebuild_phase.returncode +def _doebuild_path(settings, eapi=None): + """ + Generate the PATH variable. + """ + + # Note: PORTAGE_BIN_PATH may differ from the global constant + # when portage is reinstalling itself. + portage_bin_path = settings["PORTAGE_BIN_PATH"] + eprefix = settings["EPREFIX"] + prerootpath = [x for x in settings.get("PREROOTPATH", "").split(":") if x] + rootpath = [x for x in settings.get("ROOTPATH", "").split(":") if x] + + prefixes = [] + if eprefix: + prefixes.append(eprefix) + prefixes.append("/") + + path = [] + + if eapi not in (None, "0", "1", "2", "3"): + path.append(os.path.join(portage_bin_path, "ebuild-helpers", "4")) + + path.append(os.path.join(portage_bin_path, "ebuild-helpers")) + path.extend(prerootpath) + + for prefix in prefixes: + for x in ("usr/local/sbin", "usr/local/bin", "usr/sbin", "usr/bin", "sbin", "bin"): + path.append(os.path.join(prefix, x)) + + path.extend(rootpath) + settings["PATH"] = ":".join(path) + def doebuild_environment(myebuild, mydo, myroot=None, settings=None, debug=False, use_cache=None, db=None): """ @@ -143,20 +174,32 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None, ebuild_path = os.path.abspath(myebuild) pkg_dir = os.path.dirname(ebuild_path) mytree = os.path.dirname(os.path.dirname(pkg_dir)) - - if "CATEGORY" in mysettings.configdict["pkg"]: - cat = mysettings.configdict["pkg"]["CATEGORY"] - else: - cat = os.path.basename(normalize_path(os.path.join(pkg_dir, ".."))) - mypv = os.path.basename(ebuild_path)[:-7] - - mycpv = cat+"/"+mypv - mysplit = _pkgsplit(mypv) + mysplit = _pkgsplit(mypv, eapi=mysettings.configdict["pkg"].get("EAPI")) if mysplit is None: raise IncorrectParameter( _("Invalid ebuild path: '%s'") % myebuild) + if mysettings.mycpv is not None and \ + mysettings.configdict["pkg"].get("PF") == mypv and \ + "CATEGORY" in mysettings.configdict["pkg"]: + # Assume that PF is enough to assume that we've got + # the correct CATEGORY, though this is not really + # a solid assumption since it's possible (though + # unlikely) that two packages in different + # categories have the same PF. Callers should call + # setcpv or create a clean clone of a locked config + # instance in order to ensure that this assumption + # does not fail like in bug #408817. + cat = mysettings.configdict["pkg"]["CATEGORY"] + mycpv = mysettings.mycpv + elif os.path.basename(pkg_dir) in (mysplit[0], mypv): + # portdbapi or vardbapi + cat = os.path.basename(os.path.dirname(pkg_dir)) + mycpv = cat + "/" + mypv + else: + raise AssertionError("unable to determine CATEGORY") + # Make a backup of PORTAGE_TMPDIR prior to calling config.reset() # so that the caller can override it. tmpdir = mysettings["PORTAGE_TMPDIR"] @@ -208,11 +251,11 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None, mysettings["FILESDIR"] = pkg_dir+"/files" mysettings["PF"] = mypv - if hasattr(mydbapi, '_repo_info'): - repo_info = mydbapi._repo_info[mytree] - mysettings['PORTDIR'] = repo_info.portdir - mysettings['PORTDIR_OVERLAY'] = repo_info.portdir_overlay - mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo_info.name + if hasattr(mydbapi, 'repositories'): + repo = mydbapi.repositories.get_repo_for_location(mytree) + mysettings['PORTDIR'] = repo.eclass_db.porttrees[0] + mysettings['PORTDIR_OVERLAY'] = ' '.join(repo.eclass_db.porttrees[1:]) + mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo.name mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"]) mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"]) @@ -235,16 +278,6 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None, else: mysettings["PVR"]=mysplit[1]+"-"+mysplit[2] - if "PATH" in mysettings: - mysplit=mysettings["PATH"].split(":") - else: - mysplit=[] - # Note: PORTAGE_BIN_PATH may differ from the global constant - # when portage is reinstalling itself. - portage_bin_path = mysettings["PORTAGE_BIN_PATH"] - if portage_bin_path not in mysplit: - mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"] - # All temporary directories should be subdirectories of # $PORTAGE_TMPDIR/portage, since it's common for /tmp and /var/tmp # to be mounted with the "noexec" option (see bug #346899). @@ -268,7 +301,9 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None, mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp") # Prefix forward compatability - mysettings["ED"] = mysettings["D"] + eprefix_lstrip = mysettings["EPREFIX"].lstrip(os.sep) + mysettings["ED"] = os.path.join( + mysettings["D"], eprefix_lstrip).rstrip(os.sep) + os.sep mysettings["PORTAGE_BASHRC"] = os.path.join( mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE) @@ -276,37 +311,41 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None, mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_DIR) # Allow color.map to control colors associated with einfo, ewarn, etc... - mycolors = [] - for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"): - mycolors.append("%s=$'%s'" % \ - (c, style_to_ansi_code(c))) - mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors) - - # All EAPI dependent code comes last, so that essential variables - # like PORTAGE_BUILDDIR are still initialized even in cases when + mysettings["PORTAGE_COLORMAP"] = colormap() + + if "COLUMNS" not in mysettings: + # Set COLUMNS, in order to prevent unnecessary stty calls + # inside the set_colors function of isolated-functions.sh. + # We cache the result in os.environ, in order to avoid + # multiple stty calls in cases when get_term_size() falls + # back to stty due to a missing or broken curses module. + columns = os.environ.get("COLUMNS") + if columns is None: + rows, columns = portage.output.get_term_size() + if columns < 1: + # Force a sane value for COLUMNS, so that tools + # like ls don't complain (see bug #394091). + columns = 80 + columns = str(columns) + os.environ["COLUMNS"] = columns + mysettings["COLUMNS"] = columns + + # EAPI is always known here, even for the "depend" phase, because + # EbuildMetadataPhase gets it from _parse_eapi_ebuild_head(). + eapi = mysettings.configdict['pkg']['EAPI'] + _doebuild_path(mysettings, eapi=eapi) + + # All EAPI dependent code comes last, so that essential variables like + # PATH and PORTAGE_BUILDDIR are still initialized even in cases when # UnsupportedAPIException needs to be raised, which can be useful # when uninstalling a package that has corrupt EAPI metadata. - eapi = None - if mydo == 'depend' and 'EAPI' not in mysettings.configdict['pkg']: - if eapi is None and 'parse-eapi-ebuild-head' in mysettings.features: - eapi = _parse_eapi_ebuild_head( - io.open(_unicode_encode(ebuild_path, - encoding=_encodings['fs'], errors='strict'), - mode='r', encoding=_encodings['content'], errors='replace')) - - if eapi is not None: - if not eapi_is_supported(eapi): - raise UnsupportedAPIException(mycpv, eapi) - mysettings.configdict['pkg']['EAPI'] = eapi + if not eapi_is_supported(eapi): + raise UnsupportedAPIException(mycpv, eapi) - if mydo != "depend": - # Metadata vars such as EAPI and RESTRICT are - # set by the above config.setcpv() call. - eapi = mysettings["EAPI"] - if not eapi_is_supported(eapi): - # can't do anything with this. - raise UnsupportedAPIException(mycpv, eapi) + if eapi_exports_REPOSITORY(eapi) and "PORTAGE_REPO_NAME" in mysettings.configdict["pkg"]: + mysettings.configdict["pkg"]["REPOSITORY"] = mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] + if mydo != "depend": if hasattr(mydbapi, "getFetchMap") and \ ("A" not in mysettings.configdict["pkg"] or \ "AA" not in mysettings.configdict["pkg"]): @@ -331,22 +370,41 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None, else: mysettings.configdict["pkg"]["AA"] = " ".join(uri_map) - if not eapi_exports_KV(eapi): - # Discard KV for EAPIs that don't support it. Cache KV is restored - # from the backupenv whenever config.reset() is called. - mysettings.pop('KV', None) - elif mydo != 'depend' and 'KV' not in mysettings and \ - mydo in ('compile', 'config', 'configure', 'info', - 'install', 'nofetch', 'postinst', 'postrm', 'preinst', - 'prepare', 'prerm', 'setup', 'test', 'unpack'): - mykv, err1 = ExtractKernelVersion( - os.path.join(mysettings['EROOT'], "usr/src/linux")) - if mykv: - # Regular source tree - mysettings["KV"] = mykv - else: - mysettings["KV"] = "" - mysettings.backup_changes("KV") + ccache = "ccache" in mysettings.features + distcc = "distcc" in mysettings.features + if ccache or distcc: + # Use default ABI libdir in accordance with bug #355283. + libdir = None + default_abi = mysettings.get("DEFAULT_ABI") + if default_abi: + libdir = mysettings.get("LIBDIR_" + default_abi) + if not libdir: + libdir = "lib" + + if distcc: + mysettings["PATH"] = os.path.join(os.sep, eprefix_lstrip, + "usr", libdir, "distcc", "bin") + ":" + mysettings["PATH"] + + if ccache: + mysettings["PATH"] = os.path.join(os.sep, eprefix_lstrip, + "usr", libdir, "ccache", "bin") + ":" + mysettings["PATH"] + + if not eapi_exports_KV(eapi): + # Discard KV for EAPIs that don't support it. Cached KV is restored + # from the backupenv whenever config.reset() is called. + mysettings.pop('KV', None) + elif 'KV' not in mysettings and \ + mydo in ('compile', 'config', 'configure', 'info', + 'install', 'nofetch', 'postinst', 'postrm', 'preinst', + 'prepare', 'prerm', 'setup', 'test', 'unpack'): + mykv, err1 = ExtractKernelVersion( + os.path.join(mysettings['EROOT'], "usr/src/linux")) + if mykv: + # Regular source tree + mysettings["KV"] = mykv + else: + mysettings["KV"] = "" + mysettings.backup_changes("KV") _doebuild_manifest_cache = None _doebuild_broken_ebuilds = set() @@ -356,7 +414,7 @@ _doebuild_commands_without_builddir = ( 'fetch', 'fetchall', 'help', 'manifest' ) -def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, +def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0, fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None, mydbapi=None, vartree=None, prev_mtimes=None, fd_pipes=None, returnpid=False): @@ -368,10 +426,10 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, @type myebuild: String @param mydo: Phase to run @type mydo: String - @param myroot: $ROOT (usually '/', see man make.conf) - @type myroot: String - @param mysettings: Portage Configuration - @type mysettings: instance of portage.config + @param _unused: Deprecated (use settings["ROOT"] instead) + @type _unused: String + @param settings: Portage Configuration + @type settings: instance of portage.config @param debug: Turns on various debug information (eg, debug for spawn) @type debug: Boolean @param listonly: Used to wrap fetch(); passed such that fetch only lists files required. @@ -403,7 +461,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, caller clean up all returned PIDs. @type returnpid: Boolean @rtype: Boolean - @returns: + @return: 1. 0 for success 2. 1 for error @@ -414,7 +472,18 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, Other variables may not be strictly required, many have defaults that are set inside of doebuild. """ - + + if settings is None: + raise TypeError("settings parameter is required") + mysettings = settings + myroot = settings['EROOT'] + + if _unused is not None and _unused != mysettings['EROOT']: + warnings.warn("The third parameter of the " + "portage.doebuild() is now unused. Use " + "settings['ROOT'] instead.", + DeprecationWarning, stacklevel=2) + if not tree: writemsg("Warning: tree not specified to doebuild\n") tree = "porttree" @@ -432,6 +501,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, "install":["test"], "rpm": ["install"], "package":["install"], + "merge" :["install"], } if mydbapi is None: @@ -480,21 +550,28 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, return 1 global _doebuild_manifest_cache + pkgdir = os.path.dirname(myebuild) + manifest_path = os.path.join(pkgdir, "Manifest") + if tree == "porttree": + repo_config = mysettings.repositories.get_repo_for_location( + os.path.dirname(os.path.dirname(pkgdir))) + else: + repo_config = None + mf = None if "strict" in features and \ "digest" not in features and \ tree == "porttree" and \ + not repo_config.thin_manifest and \ mydo not in ("digest", "manifest", "help") and \ - not portage._doebuild_manifest_exempt_depend: + not portage._doebuild_manifest_exempt_depend and \ + not (repo_config.allow_missing_manifest and not os.path.exists(manifest_path)): # Always verify the ebuild checksums before executing it. global _doebuild_broken_ebuilds if myebuild in _doebuild_broken_ebuilds: return 1 - pkgdir = os.path.dirname(myebuild) - manifest_path = os.path.join(pkgdir, "Manifest") - # Avoid checking the same Manifest several times in a row during a # regen with an empty cache. if _doebuild_manifest_cache is None or \ @@ -505,7 +582,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, out.eerror(_("Manifest not found for '%s'") % (myebuild,)) _doebuild_broken_ebuilds.add(myebuild) return 1 - mf = Manifest(pkgdir, mysettings["DISTDIR"]) + mf = repo_config.load_manifest(pkgdir, mysettings["DISTDIR"]) else: mf = _doebuild_manifest_cache @@ -513,10 +590,12 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, try: mf.checkFileHashes("EBUILD", os.path.basename(myebuild)) except KeyError: - out = portage.output.EOutput() - out.eerror(_("Missing digest for '%s'") % (myebuild,)) - _doebuild_broken_ebuilds.add(myebuild) - return 1 + if not (mf.allow_missing and + os.path.basename(myebuild) not in mf.fhashdict["EBUILD"]): + out = portage.output.EOutput() + out.eerror(_("Missing digest for '%s'") % (myebuild,)) + _doebuild_broken_ebuilds.add(myebuild) + return 1 except FileNotFound: out = portage.output.EOutput() out.eerror(_("A file listed in the Manifest " @@ -536,7 +615,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, if mf.getFullname() in _doebuild_broken_manifests: return 1 - if mf is not _doebuild_manifest_cache: + if mf is not _doebuild_manifest_cache and not mf.allow_missing: # Make sure that all of the ebuilds are # actually listed in the Manifest. @@ -553,8 +632,8 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, _doebuild_broken_manifests.add(manifest_path) return 1 - # Only cache it if the above stray files test succeeds. - _doebuild_manifest_cache = mf + # We cache it only after all above checks succeed. + _doebuild_manifest_cache = mf logfile=None builddir_lock = None @@ -594,7 +673,6 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, if builddir_lock is not None: builddir_lock.unlock() - restrict = set(mysettings.get('PORTAGE_RESTRICT', '').split()) # get possible slot information from the deps file if mydo == "depend": writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2) @@ -654,6 +732,13 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, if rval != os.EX_OK: return rval + else: + # FEATURES=noauto only makes sense for porttree, and we don't want + # it to trigger redundant sourcing of the ebuild for API consumers + # that are using binary packages + if "noauto" in mysettings.features: + mysettings.features.discard("noauto") + # The info phase is special because it uses mkdtemp so and # user (not necessarily in the portage group) can run it. if mydo not in ('info',) and \ @@ -666,6 +751,73 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, return unmerge(mysettings["CATEGORY"], mysettings["PF"], myroot, mysettings, vartree=vartree) + phases_to_run = set() + if "noauto" in mysettings.features or \ + mydo not in actionmap_deps: + phases_to_run.add(mydo) + else: + phase_stack = [mydo] + while phase_stack: + x = phase_stack.pop() + if x in phases_to_run: + continue + phases_to_run.add(x) + phase_stack.extend(actionmap_deps.get(x, [])) + del phase_stack + + alist = set(mysettings.configdict["pkg"].get("A", "").split()) + + unpacked = False + if tree != "porttree": + pass + elif "unpack" not in phases_to_run: + unpacked = os.path.exists(os.path.join( + mysettings["PORTAGE_BUILDDIR"], ".unpacked")) + else: + try: + workdir_st = os.stat(mysettings["WORKDIR"]) + except OSError: + pass + else: + newstuff = False + if not os.path.exists(os.path.join( + mysettings["PORTAGE_BUILDDIR"], ".unpacked")): + writemsg_stdout(_( + ">>> Not marked as unpacked; recreating WORKDIR...\n")) + newstuff = True + else: + for x in alist: + writemsg_stdout(">>> Checking %s's mtime...\n" % x) + try: + x_st = os.stat(os.path.join( + mysettings["DISTDIR"], x)) + except OSError: + # file not fetched yet + x_st = None + + if x_st is None or x_st.st_mtime > workdir_st.st_mtime: + writemsg_stdout(_(">>> Timestamp of " + "%s has changed; recreating WORKDIR...\n") % x) + newstuff = True + break + + if newstuff: + if builddir_lock is None and \ + 'PORTAGE_BUILDIR_LOCKED' not in mysettings: + builddir_lock = EbuildBuildDir( + scheduler=PollScheduler().sched_iface, + settings=mysettings) + builddir_lock.lock() + try: + _spawn_phase("clean", mysettings) + finally: + if builddir_lock is not None: + builddir_lock.unlock() + builddir_lock = None + else: + writemsg_stdout(_(">>> WORKDIR is up-to-date, keeping...\n")) + unpacked = True + # Build directory creation isn't required for any of these. # In the fetch phase, the directory is needed only for RESTRICT=fetch # in order to satisfy the sane $PWD requirement (from bug #239560) @@ -739,10 +891,9 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, # Only try and fetch the files if we are going to need them ... # otherwise, if user has FEATURES=noauto and they run `ebuild clean # unpack compile install`, we will try and fetch 4 times :/ - need_distfiles = tree == "porttree" and \ + need_distfiles = tree == "porttree" and not unpacked and \ (mydo in ("fetch", "unpack") or \ mydo not in ("digest", "manifest") and "noauto" not in features) - alist = set(mysettings.configdict["pkg"].get("A", "").split()) if need_distfiles: src_uri, = mydbapi.aux_get(mysettings.mycpv, @@ -783,10 +934,14 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, return 0 return 1 - if mydo == "fetch": + if need_distfiles: # Files are already checked inside fetch(), # so do not check them again. checkme = [] + elif unpacked: + # The unpack phase is marked as complete, so it + # would be wasteful to check distfiles again. + checkme = [] else: checkme = alist @@ -845,7 +1000,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, # this phase. This can raise PermissionDenied if # the current user doesn't have write access to $PKGDIR. if hasattr(portage, 'db'): - bintree = portage.db[mysettings["ROOT"]]["bintree"] + bintree = portage.db[mysettings['EROOT']]['bintree'] mysettings["PORTAGE_BINPKG_TMPFILE"] = \ bintree.getname(mysettings.mycpv) + \ ".%s" % (os.getpid(),) @@ -866,6 +1021,13 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, if mydo == "package" and bintree is not None: bintree.inject(mysettings.mycpv, filename=mysettings["PORTAGE_BINPKG_TMPFILE"]) + else: + if "PORTAGE_BINPKG_TMPFILE" in mysettings: + try: + os.unlink(mysettings["PORTAGE_BINPKG_TMPFILE"]) + except OSError: + pass + elif mydo=="qmerge": # check to ensure install was run. this *only* pops up when users # forget it and are using ebuild @@ -877,6 +1039,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, # qmerge is a special phase that implies noclean. if "noclean" not in mysettings.features: mysettings.features.add("noclean") + _handle_self_update(mysettings, vartree.dbapi) #qmerge is specifically not supposed to do a runtime dep check retval = merge( mysettings["CATEGORY"], mysettings["PF"], mysettings["D"], @@ -893,6 +1056,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0, # so that it's only called once. elog_process(mysettings.mycpv, mysettings) if retval == os.EX_OK: + _handle_self_update(mysettings, vartree.dbapi) retval = merge(mysettings["CATEGORY"], mysettings["PF"], mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"), myroot, mysettings, @@ -944,10 +1108,31 @@ def _check_temp_dir(settings): # as some people use a separate PORTAGE_TMPDIR mount # we prefer that as the checks below would otherwise be pointless # for those people. - if os.path.exists(os.path.join(settings["PORTAGE_TMPDIR"], "portage")): - checkdir = os.path.join(settings["PORTAGE_TMPDIR"], "portage") + tmpdir = os.path.realpath(settings["PORTAGE_TMPDIR"]) + if os.path.exists(os.path.join(tmpdir, "portage")): + checkdir = os.path.realpath(os.path.join(tmpdir, "portage")) + if ("sandbox" in settings.features or + "usersandox" in settings.features) and \ + not checkdir.startswith(tmpdir + os.sep): + msg = _("The 'portage' subdirectory of the directory " + "referenced by the PORTAGE_TMPDIR variable appears to be " + "a symlink. In order to avoid sandbox violations (see bug " + "#378379), you must adjust PORTAGE_TMPDIR instead of using " + "the symlink located at '%s'. A suitable PORTAGE_TMPDIR " + "setting would be '%s'.") % \ + (os.path.join(tmpdir, "portage"), checkdir) + lines = [] + lines.append("") + lines.append("") + lines.extend(wrap(msg, 72)) + lines.append("") + for line in lines: + if line: + line = "!!! %s" % (line,) + writemsg("%s\n" % (line,), noiselevel=-1) + return 1 else: - checkdir = settings["PORTAGE_TMPDIR"] + checkdir = tmpdir if not os.access(checkdir, os.W_OK): writemsg(_("%s is not writable.\n" @@ -955,8 +1140,7 @@ def _check_temp_dir(settings): noiselevel=-1) return 1 - else: - fd = tempfile.NamedTemporaryFile(prefix="exectest-", dir=checkdir) + with tempfile.NamedTemporaryFile(prefix="exectest-", dir=checkdir) as fd: os.chmod(fd.name, 0o755) if not os.access(fd.name, os.X_OK): writemsg(_("Can not execute files in %s\n" @@ -1085,7 +1269,8 @@ def _validate_deps(mysettings, myroot, mydo, mydbapi): all_keys.add("SRC_URI") all_keys = tuple(all_keys) metadata = dict(zip(all_keys, - mydbapi.aux_get(mysettings.mycpv, all_keys))) + mydbapi.aux_get(mysettings.mycpv, all_keys, + myrepo=mysettings.get("PORTAGE_REPO_NAME")))) class FakeTree(object): def __init__(self, mydb): @@ -1173,7 +1358,7 @@ def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakero @param keywords: Extra options encoded as a dict, to be passed to spawn @type keywords: Dictionary @rtype: Integer - @returns: + @return: 1. The return code of the spawned process. """ @@ -1201,7 +1386,8 @@ def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakero # fake ownership/permissions will have to be converted to real # permissions in the merge phase. fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable - if droppriv and not uid and portage_gid and portage_uid: + if droppriv and uid == 0 and portage_gid and portage_uid and \ + hasattr(os, "setgroups"): keywords.update({"uid":portage_uid,"gid":portage_gid, "groups":userpriv_groups,"umask":0o02}) if not free: @@ -1277,6 +1463,17 @@ def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0, if mydo == "pretend" and not eapi_has_pkg_pretend(eapi): return os.EX_OK + if not (mydo == "install" and "noauto" in mysettings.features): + check_file = os.path.join( + mysettings["PORTAGE_BUILDDIR"], ".%sed" % mydo.rstrip('e')) + if os.path.exists(check_file): + writemsg_stdout(_(">>> It appears that " + "'%(action)s' has already executed for '%(pkg)s'; skipping.\n") % + {"action":mydo, "pkg":mysettings["PF"]}) + writemsg_stdout(_(">>> Remove '%(file)s' to force %(action)s.\n") % + {"file":check_file, "action":mydo}) + return os.EX_OK + return _spawn_phase(mydo, mysettings, actionmap=actionmap, logfile=logfile, fd_pipes=fd_pipes, returnpid=returnpid) @@ -1285,13 +1482,14 @@ _post_phase_cmds = { "install" : [ "install_qa_check", - "install_symlink_html_docs"], + "install_symlink_html_docs", + "install_hooks"], "preinst" : [ "preinst_sfperms", "preinst_selinux_labels", "preinst_suid_scan", - "preinst_mask"] + ] } def _post_phase_userpriv_perms(mysettings): @@ -1320,7 +1518,9 @@ def _check_build_log(mysettings, out=None): except EnvironmentError: return + f_real = None if logfile.endswith('.gz'): + f_real = f f = gzip.GzipFile(filename='', mode='rb', fileobj=f) am_maintainer_mode = [] @@ -1425,19 +1625,32 @@ def _check_build_log(mysettings, out=None): msg.extend("\t" + line for line in make_jobserver) _eqawarn(msg) + f.close() + if f_real is not None: + f_real.close() + def _post_src_install_chost_fix(settings): """ It's possible that the ebuild has changed the CHOST variable, so revert it to the initial - setting. + setting. Also, revert IUSE in case it's corrupted + due to local environment settings like in bug #386829. """ - if settings.get('CATEGORY') == 'virtual': - return - chost = settings.get('CHOST') - if chost: - write_atomic(os.path.join(settings['PORTAGE_BUILDDIR'], - 'build-info', 'CHOST'), chost + '\n') + build_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info') + + for k in ('IUSE',): + v = settings.get(k) + if v is not None: + write_atomic(os.path.join(build_info_dir, k), v + '\n') + + # The following variables are irrelevant for virtual packages. + if settings.get('CATEGORY') != 'virtual': + + for k in ('CHOST',): + v = settings.get(k) + if v is not None: + write_atomic(os.path.join(build_info_dir, k), v + '\n') _vdb_use_conditional_keys = ('DEPEND', 'LICENSE', 'PDEPEND', 'PROPERTIES', 'PROVIDE', 'RDEPEND', 'RESTRICT',) @@ -1481,6 +1694,7 @@ def _post_src_install_uid_fix(mysettings, out): _preinst_bsdflags(mysettings) destdir = mysettings["D"] + ed_len = len(mysettings["ED"]) unicode_errors = [] while True: @@ -1499,12 +1713,12 @@ def _post_src_install_uid_fix(mysettings, out): new_parent = _unicode_decode(parent, encoding=_encodings['merge'], errors='replace') new_parent = _unicode_encode(new_parent, - encoding=_encodings['merge'], errors='backslashreplace') + encoding='ascii', errors='backslashreplace') new_parent = _unicode_decode(new_parent, encoding=_encodings['merge'], errors='replace') os.rename(parent, new_parent) unicode_error = True - unicode_errors.append(new_parent[len(destdir):]) + unicode_errors.append(new_parent[ed_len:]) break for fname in chain(dirs, files): @@ -1517,13 +1731,13 @@ def _post_src_install_uid_fix(mysettings, out): new_fname = _unicode_decode(fname, encoding=_encodings['merge'], errors='replace') new_fname = _unicode_encode(new_fname, - encoding=_encodings['merge'], errors='backslashreplace') + encoding='ascii', errors='backslashreplace') new_fname = _unicode_decode(new_fname, encoding=_encodings['merge'], errors='replace') new_fpath = os.path.join(parent, new_fname) os.rename(fpath, new_fpath) unicode_error = True - unicode_errors.append(new_fpath[len(destdir):]) + unicode_errors.append(new_fpath[ed_len:]) fname = new_fname fpath = new_fpath else: @@ -1597,20 +1811,24 @@ def _post_src_install_uid_fix(mysettings, out): if unicode_errors: for l in _merge_unicode_error(unicode_errors): - eerror(l, phase='install', key=mysettings.mycpv, out=out) + eqawarn(l, phase='install', key=mysettings.mycpv, out=out) build_info_dir = os.path.join(mysettings['PORTAGE_BUILDDIR'], 'build-info') - io.open(_unicode_encode(os.path.join(build_info_dir, + f = io.open(_unicode_encode(os.path.join(build_info_dir, 'SIZE'), encoding=_encodings['fs'], errors='strict'), mode='w', encoding=_encodings['repo.content'], - errors='strict').write(_unicode_decode(str(size) + '\n')) + errors='strict') + f.write(_unicode_decode(str(size) + '\n')) + f.close() - io.open(_unicode_encode(os.path.join(build_info_dir, + f = io.open(_unicode_encode(os.path.join(build_info_dir, 'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'), mode='w', encoding=_encodings['repo.content'], - errors='strict').write(_unicode_decode("%.0f\n" % (time.time(),))) + errors='strict') + f.write(_unicode_decode("%.0f\n" % (time.time(),))) + f.close() use = frozenset(mysettings['PORTAGE_USE'].split()) for k in _vdb_use_conditional_keys: @@ -1636,10 +1854,12 @@ def _post_src_install_uid_fix(mysettings, out): except OSError: pass continue - io.open(_unicode_encode(os.path.join(build_info_dir, + f = io.open(_unicode_encode(os.path.join(build_info_dir, k), encoding=_encodings['fs'], errors='strict'), mode='w', encoding=_encodings['repo.content'], - errors='strict').write(_unicode_decode(v + '\n')) + errors='strict') + f.write(_unicode_decode(v + '\n')) + f.close() _reapply_bsdflags_to_image(mysettings) @@ -1664,15 +1884,46 @@ def _post_src_install_soname_symlinks(mysettings, out): needed_filename = os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info", "NEEDED.ELF.2") + f = None try: - lines = io.open(_unicode_encode(needed_filename, + f = io.open(_unicode_encode(needed_filename, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['repo.content'], - errors='replace').readlines() + errors='replace') + lines = f.readlines() except IOError as e: if e.errno not in (errno.ENOENT, errno.ESTALE): raise return + finally: + if f is not None: + f.close() + + qa_no_symlink = "" + f = None + try: + f = io.open(_unicode_encode(os.path.join( + mysettings["PORTAGE_BUILDDIR"], + "build-info", "QA_SONAME_NO_SYMLINK"), + encoding=_encodings['fs'], errors='strict'), + mode='r', encoding=_encodings['repo.content'], + errors='replace') + qa_no_symlink = f.read() + except IOError as e: + if e.errno not in (errno.ENOENT, errno.ESTALE): + raise + finally: + if f is not None: + f.close() + + qa_no_symlink = qa_no_symlink.split() + if qa_no_symlink: + if len(qa_no_symlink) > 1: + qa_no_symlink = "|".join("(%s)" % x for x in qa_no_symlink) + qa_no_symlink = "^(%s)$" % qa_no_symlink + else: + qa_no_symlink = "^%s$" % qa_no_symlink[0] + qa_no_symlink = re.compile(qa_no_symlink) libpaths = set(portage.util.getlibpaths( mysettings["ROOT"], env=mysettings)) @@ -1730,6 +1981,8 @@ def _post_src_install_soname_symlinks(mysettings, out): continue if not is_libdir(os.path.dirname(obj)): continue + if qa_no_symlink and qa_no_symlink.match(obj.strip(os.sep)) is not None: + continue obj_file_path = os.path.join(image_dir, obj.lstrip(os.sep)) sym_file_path = os.path.join(os.path.dirname(obj_file_path), soname) @@ -1746,8 +1999,7 @@ def _post_src_install_soname_symlinks(mysettings, out): if not missing_symlinks: return - qa_msg = ["QA Notice: Missing soname symlink(s) " + \ - "will be automatically created:"] + qa_msg = ["QA Notice: Missing soname symlink(s):"] qa_msg.append("") qa_msg.extend("\t%s -> %s" % (os.path.join( os.path.dirname(obj).lstrip(os.sep), soname), @@ -1757,20 +2009,11 @@ def _post_src_install_soname_symlinks(mysettings, out): for line in qa_msg: eqawarn(line, key=mysettings.mycpv, out=out) - _preinst_bsdflags(mysettings) - for obj, soname in missing_symlinks: - obj_file_path = os.path.join(image_dir, obj.lstrip(os.sep)) - sym_file_path = os.path.join(os.path.dirname(obj_file_path), soname) - os.symlink(os.path.basename(obj_file_path), sym_file_path) - _reapply_bsdflags_to_image(mysettings) - def _merge_unicode_error(errors): lines = [] - msg = _("This package installs one or more file names containing " - "characters that do not match your current locale " - "settings. The current setting for filesystem encoding is '%s'.") \ - % _encodings['merge'] + msg = _("QA Notice: This package installs one or more file names " + "containing characters that are not encoded with the UTF-8 encoding.") lines.extend(wrap(msg, 72)) lines.append("") @@ -1778,14 +2021,55 @@ def _merge_unicode_error(errors): lines.extend("\t" + x for x in errors) lines.append("") - if _encodings['merge'].lower().replace('_', '').replace('-', '') != 'utf8': - msg = _("For best results, UTF-8 encoding is recommended. See " - "the Gentoo Linux Localization Guide for instructions " - "about how to configure your locale for UTF-8 encoding:") - lines.extend(wrap(msg, 72)) - lines.append("") - lines.append("\t" + \ - "http://www.gentoo.org/doc/en/guide-localization.xml") - lines.append("") - return lines + +def _prepare_self_update(settings): + """ + Call this when portage is updating itself, in order to create + temporary copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH, since + the new versions may be incompatible. An atexit hook will + automatically clean up the temporary copies. + """ + + # sanity check: ensure that that this routine only runs once + if portage._bin_path != portage.const.PORTAGE_BIN_PATH: + return + + # Load lazily referenced portage submodules into memory, + # so imports won't fail during portage upgrade/downgrade. + _preload_elog_modules(settings) + portage.proxy.lazyimport._preload_portage_submodules() + + # Make the temp directory inside $PORTAGE_TMPDIR/portage, since + # it's common for /tmp and /var/tmp to be mounted with the + # "noexec" option (see bug #346899). + build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage") + portage.util.ensure_dirs(build_prefix) + base_path_tmp = tempfile.mkdtemp( + "", "._portage_reinstall_.", build_prefix) + portage.process.atexit_register(shutil.rmtree, base_path_tmp) + + orig_bin_path = portage._bin_path + portage._bin_path = os.path.join(base_path_tmp, "bin") + shutil.copytree(orig_bin_path, portage._bin_path, symlinks=True) + + orig_pym_path = portage._pym_path + portage._pym_path = os.path.join(base_path_tmp, "pym") + shutil.copytree(orig_pym_path, portage._pym_path, symlinks=True) + + for dir_path in (base_path_tmp, portage._bin_path, portage._pym_path): + os.chmod(dir_path, 0o755) + +def _handle_self_update(settings, vardb): + cpv = settings.mycpv + if settings["ROOT"] == "/" and \ + portage.dep.match_from_list( + portage.const.PORTAGE_PACKAGE_ATOM, [cpv]): + inherited = frozenset(settings.get('INHERITED', '').split()) + if not vardb.cpv_exists(cpv) or \ + '9999' in cpv or \ + 'git' in inherited or \ + 'git-2' in inherited: + _prepare_self_update(settings) + return True + return False diff --git a/portage_with_autodep/pym/portage/package/ebuild/doebuild.pyo b/portage_with_autodep/pym/portage/package/ebuild/doebuild.pyo Binary files differnew file mode 100644 index 0000000..a6ebb1d --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/doebuild.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/fetch.py b/portage_with_autodep/pym/portage/package/ebuild/fetch.py index 5cbbf87..b795b28 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/fetch.py +++ b/portage_with_autodep/pym/portage/package/ebuild/fetch.py @@ -1,4 +1,4 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 from __future__ import print_function @@ -10,7 +10,6 @@ import io import logging import random import re -import shutil import stat import sys import tempfile @@ -24,7 +23,7 @@ portage.proxy.lazyimport.lazyimport(globals(), 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs', ) -from portage import OrderedDict, os, selinux, _encodings, \ +from portage import OrderedDict, os, selinux, shutil, _encodings, \ _shell_quote, _unicode_encode from portage.checksum import hashfunc_map, perform_md5, verify_all from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \ @@ -34,7 +33,6 @@ from portage.exception import FileNotFound, OperationNotPermitted, \ PortageException, TryAgain from portage.localization import _ from portage.locks import lockfile, unlockfile -from portage.manifest import Manifest from portage.output import colorize, EOutput from portage.util import apply_recursive_permissions, \ apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \ @@ -48,6 +46,9 @@ _userpriv_spawn_kwargs = ( ("umask", 0o02), ) +def _hide_url_passwd(url): + return re.sub(r'//(.+):.+@(.+)', r'//\1:*password*@\2', url) + def _spawn_fetch(settings, args, **kwargs): """ Spawn a process with appropriate settings for fetching, including @@ -68,7 +69,8 @@ def _spawn_fetch(settings, args, **kwargs): } if "userfetch" in settings.features and \ - os.getuid() == 0 and portage_gid and portage_uid: + os.getuid() == 0 and portage_gid and portage_uid and \ + hasattr(os, "setgroups"): kwargs.update(_userpriv_spawn_kwargs) spawn_func = spawn @@ -356,7 +358,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0, allow_missing_digests = True pkgdir = mysettings.get("O") if digests is None and not (pkgdir is None or skip_manifest): - mydigests = Manifest( + mydigests = mysettings.repositories.get_repo_for_location( + os.path.dirname(os.path.dirname(pkgdir))).load_manifest( pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST") elif digests is None or skip_manifest: # no digests because fetch was not called for a specific package @@ -612,18 +615,6 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0, elif userfetch: has_space = False - if not has_space: - writemsg(_("!!! Insufficient space to store %s in %s\n") % \ - (myfile, mysettings["DISTDIR"]), noiselevel=-1) - - if has_space_superuser: - writemsg(_("!!! Insufficient privileges to use " - "remaining space.\n"), noiselevel=-1) - if userfetch: - writemsg(_("!!! You may set FEATURES=\"-userfetch\"" - " in /etc/make.conf in order to fetch with\n" - "!!! superuser privileges.\n"), noiselevel=-1) - if distdir_writable and use_locks: lock_kwargs = {} @@ -646,7 +637,10 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0, match, mystat = _check_distfile( myfile_path, pruned_digests, eout) if match: - if distdir_writable: + # Skip permission adjustment for symlinks, since we don't + # want to modify anything outside of the primary DISTDIR, + # and symlinks typically point to PORTAGE_RO_DISTDIRS. + if distdir_writable and not os.path.islink(myfile_path): try: apply_secpass_permissions(myfile_path, gid=portage_gid, mode=0o664, mask=0o2, @@ -727,6 +721,20 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0, os.symlink(readonly_file, myfile_path) continue + # this message is shown only after we know that + # the file is not already fetched + if not has_space: + writemsg(_("!!! Insufficient space to store %s in %s\n") % \ + (myfile, mysettings["DISTDIR"]), noiselevel=-1) + + if has_space_superuser: + writemsg(_("!!! Insufficient privileges to use " + "remaining space.\n"), noiselevel=-1) + if userfetch: + writemsg(_("!!! You may set FEATURES=\"-userfetch\"" + " in /etc/make.conf in order to fetch with\n" + "!!! superuser privileges.\n"), noiselevel=-1) + if fsmirrors and not os.path.exists(myfile_path) and has_space: for mydir in fsmirrors: mirror_file = os.path.join(mydir, myfile) @@ -746,14 +754,18 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0, raise del e else: - try: - apply_secpass_permissions( - myfile_path, gid=portage_gid, mode=0o664, mask=0o2, - stat_cached=mystat) - except PortageException as e: - if not os.access(myfile_path, os.R_OK): - writemsg(_("!!! Failed to adjust permissions:" - " %s\n") % str(e), noiselevel=-1) + # Skip permission adjustment for symlinks, since we don't + # want to modify anything outside of the primary DISTDIR, + # and symlinks typically point to PORTAGE_RO_DISTDIRS. + if not os.path.islink(myfile_path): + try: + apply_secpass_permissions(myfile_path, + gid=portage_gid, mode=0o664, mask=0o2, + stat_cached=mystat) + except PortageException as e: + if not os.access(myfile_path, os.R_OK): + writemsg(_("!!! Failed to adjust permissions:" + " %s\n") % (e,), noiselevel=-1) # If the file is empty then it's obviously invalid. Remove # the empty file and try to download if possible. @@ -940,7 +952,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0, locfetch=fetchcommand command_var = fetchcommand_var writemsg_stdout(_(">>> Downloading '%s'\n") % \ - re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc)) + _hide_url_passwd(loc)) variables = { "DISTDIR": mysettings["DISTDIR"], "URI": loc, @@ -1019,18 +1031,19 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0, # Fetch failed... Try the next one... Kill 404 files though. if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")): html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M) - if html404.search(io.open( + with io.open( _unicode_encode(myfile_path, encoding=_encodings['fs'], errors='strict'), mode='r', encoding=_encodings['content'], errors='replace' - ).read()): - try: - os.unlink(mysettings["DISTDIR"]+"/"+myfile) - writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")) - fetched = 0 - continue - except (IOError, OSError): - pass + ) as f: + if html404.search(f.read()): + try: + os.unlink(mysettings["DISTDIR"]+"/"+myfile) + writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")) + fetched = 0 + continue + except (IOError, OSError): + pass fetched = 1 continue if True: @@ -1040,7 +1053,6 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0, # from another mirror... verified_ok,reason = verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile]) if not verified_ok: - print(reason) writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile, noiselevel=-1) writemsg(_("!!! Reason: %s\n") % reason[0], diff --git a/portage_with_autodep/pym/portage/package/ebuild/fetch.pyo b/portage_with_autodep/pym/portage/package/ebuild/fetch.pyo Binary files differnew file mode 100644 index 0000000..3bd81df --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/fetch.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py index f2af638..8a88c2f 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py +++ b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py @@ -83,7 +83,13 @@ def getmaskingreason(mycpv, metadata=None, settings=None, pmasklists = [] for profile in locations: pmask_filename = os.path.join(profile, "package.mask") - pmasklists.append((pmask_filename, grablines(pmask_filename, recursive=1))) + node = None + for l, recursive_filename in grablines(pmask_filename, + recursive=1, remember_source_file=True): + if node is None or node[0] != recursive_filename: + node = (recursive_filename, []) + pmasklists.append(node) + node[1].append(l) pmaskdict = settings._mask_manager._pmaskdict if mycp in pmaskdict: diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.pyo b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.pyo Binary files differnew file mode 100644 index 0000000..1614244 --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py index 4c65fcc..9bf605d 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py +++ b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py @@ -1,4 +1,4 @@ -# Copyright 2010-2011 Gentoo Foundation +# Copyright 2010-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 __all__ = ['getmaskingstatus'] @@ -7,11 +7,9 @@ import sys import portage from portage import eapi_is_supported, _eapi_is_deprecated -from portage.dep import match_from_list, _slot_separator, _repo_separator from portage.localization import _ from portage.package.ebuild.config import config -from portage.versions import catpkgsplit, cpv_getkey -from _emerge.Package import Package +from portage.versions import catpkgsplit, _pkg_str if sys.hexversion >= 0x3000000: basestring = str @@ -53,9 +51,6 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None): metadata = pkg.metadata installed = pkg.installed - mysplit = catpkgsplit(mycpv) - if not mysplit: - raise ValueError(_("invalid CPV: %s") % mycpv) if metadata is None: db_keys = list(portdb._aux_cache_keys) try: @@ -70,11 +65,14 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None): else: metadata["USE"] = "" - rValue = [] + if not hasattr(mycpv, 'slot'): + try: + mycpv = _pkg_str(mycpv, slot=metadata['SLOT'], + repo=metadata.get('repository')) + except portage.exception.InvalidData: + raise ValueError(_("invalid CPV: %s") % mycpv) - # profile checking - if settings._getProfileMaskAtom(mycpv, metadata): - rValue.append(_MaskReason("profile", "profile")) + rValue = [] # package.mask checking if settings._getMaskAtom(mycpv, metadata): @@ -85,8 +83,6 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None): mygroups = settings._getKeywords(mycpv, metadata) licenses = metadata["LICENSE"] properties = metadata["PROPERTIES"] - if eapi.startswith("-"): - eapi = eapi[1:] if not eapi_is_supported(eapi): return [_MaskReason("EAPI", "EAPI %s" % eapi)] elif _eapi_is_deprecated(eapi) and not installed: diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.pyo b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.pyo Binary files differnew file mode 100644 index 0000000..9cf1d9d --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.pyo diff --git a/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py index 616dc2e..b8fbdc5 100644 --- a/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py +++ b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py @@ -5,11 +5,11 @@ __all__ = ['prepare_build_dirs'] import errno import gzip -import shutil import stat import time -from portage import os, _encodings, _unicode_encode, _unicode_decode +import portage +from portage import os, shutil, _encodings, _unicode_encode, _unicode_decode from portage.data import portage_gid, portage_uid, secpass from portage.exception import DirectoryNotFound, FileNotFound, \ OperationNotPermitted, PermissionDenied, PortageException @@ -118,11 +118,13 @@ def _adjust_perms_msg(settings, msg): background = settings.get("PORTAGE_BACKGROUND") == "1" log_path = settings.get("PORTAGE_LOG_FILE") log_file = None + log_file_real = None if background and log_path is not None: try: log_file = open(_unicode_encode(log_path, encoding=_encodings['fs'], errors='strict'), mode='ab') + log_file_real = log_file except IOError: def write(msg): pass @@ -139,6 +141,8 @@ def _adjust_perms_msg(settings, msg): finally: if log_file is not None: log_file.close() + if log_file_real is not log_file: + log_file_real.close() def _prepare_features_dirs(mysettings): @@ -311,7 +315,7 @@ def _prepare_workdir(mysettings): logdir = normalize_path(mysettings["PORT_LOGDIR"]) logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid") if not os.path.exists(logid_path): - open(_unicode_encode(logid_path), 'w') + open(_unicode_encode(logid_path), 'w').close() logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S", time.gmtime(os.stat(logid_path).st_mtime)), encoding=_encodings['content'], errors='replace') @@ -342,13 +346,31 @@ def _prepare_workdir(mysettings): writemsg(_unicode_decode("!!! %s: %s\n") % (_("Permission Denied"), log_subdir), noiselevel=-1) + tmpdir_log_path = os.path.join( + mysettings["T"], "build.log%s" % compress_log_ext) if not logdir_subdir_ok: # NOTE: When sesandbox is enabled, the local SELinux security policies # may not allow output to be piped out of the sesandbox domain. The # current policy will allow it to work when a pty is available, but # not through a normal pipe. See bug #162404. - mysettings["PORTAGE_LOG_FILE"] = os.path.join( - mysettings["T"], "build.log%s" % compress_log_ext) + mysettings["PORTAGE_LOG_FILE"] = tmpdir_log_path + else: + # Create a symlink from tmpdir_log_path to PORTAGE_LOG_FILE, as + # requested in bug #412865. + make_new_symlink = False + try: + target = os.readlink(tmpdir_log_path) + except OSError: + make_new_symlink = True + else: + if target != mysettings["PORTAGE_LOG_FILE"]: + make_new_symlink = True + if make_new_symlink: + try: + os.unlink(tmpdir_log_path) + except OSError: + pass + os.symlink(mysettings["PORTAGE_LOG_FILE"], tmpdir_log_path) def _ensure_log_subdirs(logdir, subdir): """ @@ -358,13 +380,27 @@ def _ensure_log_subdirs(logdir, subdir): and subdir are assumed to be normalized absolute paths. """ st = os.stat(logdir) + uid = -1 gid = st.st_gid grp_mode = 0o2070 & st.st_mode + # If logdir is writable by the portage group but its uid + # is not portage_uid, then set the uid to portage_uid if + # we have privileges to do so, for compatibility with our + # default logrotate config (see bug 378451). With the + # "su portage portage" directive and logrotate-3.8.0, + # logrotate's chown call during the compression phase will + # only succeed if the log file's uid is portage_uid. + if grp_mode and gid == portage_gid and \ + portage.data.secpass >= 2: + uid = portage_uid + if st.st_uid != portage_uid: + ensure_dirs(logdir, uid=uid) + logdir_split_len = len(logdir.split(os.sep)) subdir_split = subdir.split(os.sep)[logdir_split_len:] subdir_split.reverse() current = logdir while subdir_split: current = os.path.join(current, subdir_split.pop()) - ensure_dirs(current, gid=gid, mode=grp_mode, mask=0) + ensure_dirs(current, uid=uid, gid=gid, mode=grp_mode, mask=0) diff --git a/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.pyo b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.pyo Binary files differnew file mode 100644 index 0000000..2dcfaea --- /dev/null +++ b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.pyo diff --git a/portage_with_autodep/pym/portage/process.py b/portage_with_autodep/pym/portage/process.py index 6866a2f..d7d1037 100644 --- a/portage_with_autodep/pym/portage/process.py +++ b/portage_with_autodep/pym/portage/process.py @@ -1,9 +1,11 @@ # portage.py -- core Portage functionality -# Copyright 1998-2010 Gentoo Foundation +# Copyright 1998-2012 Gentoo Foundation # Distributed under the terms of the GNU General Public License v2 import atexit +import errno +import platform import signal import sys import traceback @@ -32,6 +34,18 @@ if os.path.isdir("/proc/%i/fd" % os.getpid()): def get_open_fds(): return (int(fd) for fd in os.listdir("/proc/%i/fd" % os.getpid()) \ if fd.isdigit()) + + if platform.python_implementation() == 'PyPy': + # EAGAIN observed with PyPy 1.8. + _get_open_fds = get_open_fds + def get_open_fds(): + try: + return _get_open_fds() + except OSError as e: + if e.errno != errno.EAGAIN: + raise + return range(max_fd_limit) + else: def get_open_fds(): return range(max_fd_limit) @@ -257,7 +271,7 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False, pid = os.fork() - if not pid: + if pid == 0: try: _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask, pre_exec) @@ -272,6 +286,9 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False, sys.stderr.flush() os._exit(1) + if not isinstance(pid, int): + raise AssertionError("fork returned non-integer: %s" % (repr(pid),)) + # Add the pid to our local and the global pid lists. mypids.append(pid) spawned_pids.append(pid) @@ -350,13 +367,18 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask, @param pre_exec: A function to be called with no arguments just prior to the exec call. @type pre_exec: callable @rtype: None - @returns: Never returns (calls os.execve) + @return: Never returns (calls os.execve) """ # If the process we're creating hasn't been given a name # assign it the name of the executable. if not opt_name: - opt_name = os.path.basename(binary) + if binary is portage._python_interpreter: + # NOTE: PyPy 1.7 will die due to "libary path not found" if argv[0] + # does not contain the full path of the binary. + opt_name = binary + else: + opt_name = os.path.basename(binary) # Set up the command's argument list. myargs = [opt_name] @@ -391,8 +413,20 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask, # And switch to the new process. os.execve(binary, myargs, env) -def _setup_pipes(fd_pipes): - """Setup pipes for a forked process.""" +def _setup_pipes(fd_pipes, close_fds=True): + """Setup pipes for a forked process. + + WARNING: W |