aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'portage_with_autodep/pym/_emerge/depgraph.py')
-rw-r--r--portage_with_autodep/pym/_emerge/depgraph.py1140
1 files changed, 667 insertions, 473 deletions
diff --git a/portage_with_autodep/pym/_emerge/depgraph.py b/portage_with_autodep/pym/_emerge/depgraph.py
index 5b48aca..572cea7 100644
--- a/portage_with_autodep/pym/_emerge/depgraph.py
+++ b/portage_with_autodep/pym/_emerge/depgraph.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -18,7 +18,10 @@ from portage import os, OrderedDict
from portage import _unicode_decode, _unicode_encode, _encodings
from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
from portage.dbapi import dbapi
-from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
+from portage.dbapi.dep_expand import dep_expand
+from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
+ check_required_use, human_readable_required_use, match_from_list, \
+ _repo_separator
from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
from portage.exception import InvalidAtom, InvalidDependString, PortageException
from portage.output import colorize, create_color_func, \
@@ -92,15 +95,13 @@ class _frozen_depgraph_config(object):
def __init__(self, settings, trees, myopts, spinner):
self.settings = settings
- self.target_root = settings["ROOT"]
+ self.target_root = settings["EROOT"]
self.myopts = myopts
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
- self._running_root = trees["/"]["root_config"]
- self._opts_no_restart = frozenset(["--buildpkgonly",
- "--fetchonly", "--fetch-all-uri", "--pretend"])
+ self._running_root = trees[trees._running_eroot]["root_config"]
self.pkgsettings = {}
self.trees = {}
self._trees_orig = trees
@@ -108,6 +109,7 @@ class _frozen_depgraph_config(object):
# All Package instances
self._pkg_cache = {}
self._highest_license_masked = {}
+ dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
@@ -121,7 +123,8 @@ class _frozen_depgraph_config(object):
self.trees[myroot]["vartree"] = \
FakeVartree(trees[myroot]["root_config"],
pkg_cache=self._pkg_cache,
- pkg_root_config=self.roots[myroot])
+ pkg_root_config=self.roots[myroot],
+ dynamic_deps=dynamic_deps)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
@@ -174,7 +177,7 @@ class _rebuild_config(object):
rebuild_exclude = self._frozen_config.rebuild_exclude
rebuild_ignore = self._frozen_config.rebuild_ignore
if (self.rebuild and isinstance(parent, Package) and
- parent.built and (priority.buildtime or priority.runtime) and
+ parent.built and priority.buildtime and
isinstance(dep_pkg, Package) and
not rebuild_exclude.findAtomForPackage(parent) and
not rebuild_ignore.findAtomForPackage(dep_pkg)):
@@ -209,66 +212,63 @@ class _rebuild_config(object):
return True
- def _trigger_rebuild(self, parent, build_deps, runtime_deps):
+ def _trigger_rebuild(self, parent, build_deps):
root_slot = (parent.root, parent.slot_atom)
if root_slot in self.rebuild_list:
return False
trees = self._frozen_config.trees
- children = set(build_deps).intersection(runtime_deps)
reinstall = False
- for slot_atom in children:
- kids = set([build_deps[slot_atom], runtime_deps[slot_atom]])
- for dep_pkg in kids:
- dep_root_slot = (dep_pkg.root, slot_atom)
- if self._needs_rebuild(dep_pkg):
+ for slot_atom, dep_pkg in build_deps.items():
+ dep_root_slot = (dep_pkg.root, slot_atom)
+ if self._needs_rebuild(dep_pkg):
+ self.rebuild_list.add(root_slot)
+ return True
+ elif ("--usepkg" in self._frozen_config.myopts and
+ (dep_root_slot in self.reinstall_list or
+ dep_root_slot in self.rebuild_list or
+ not dep_pkg.installed)):
+
+ # A direct rebuild dependency is being installed. We
+ # should update the parent as well to the latest binary,
+ # if that binary is valid.
+ #
+ # To validate the binary, we check whether all of the
+ # rebuild dependencies are present on the same binhost.
+ #
+ # 1) If parent is present on the binhost, but one of its
+ # rebuild dependencies is not, then the parent should
+ # be rebuilt from source.
+ # 2) Otherwise, the parent binary is assumed to be valid,
+ # because all of its rebuild dependencies are
+ # consistent.
+ bintree = trees[parent.root]["bintree"]
+ uri = bintree.get_pkgindex_uri(parent.cpv)
+ dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
+ bindb = bintree.dbapi
+ if self.rebuild_if_new_ver and uri and uri != dep_uri:
+ cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+ for cpv in bindb.match(dep_pkg.slot_atom):
+ if cpv_norev == catpkgsplit(cpv)[:-1]:
+ dep_uri = bintree.get_pkgindex_uri(cpv)
+ if uri == dep_uri:
+ break
+ if uri and uri != dep_uri:
+ # 1) Remote binary package is invalid because it was
+ # built without dep_pkg. Force rebuild.
self.rebuild_list.add(root_slot)
return True
- elif ("--usepkg" in self._frozen_config.myopts and
- (dep_root_slot in self.reinstall_list or
- dep_root_slot in self.rebuild_list or
- not dep_pkg.installed)):
-
- # A direct rebuild dependency is being installed. We
- # should update the parent as well to the latest binary,
- # if that binary is valid.
- #
- # To validate the binary, we check whether all of the
- # rebuild dependencies are present on the same binhost.
- #
- # 1) If parent is present on the binhost, but one of its
- # rebuild dependencies is not, then the parent should
- # be rebuilt from source.
- # 2) Otherwise, the parent binary is assumed to be valid,
- # because all of its rebuild dependencies are
- # consistent.
- bintree = trees[parent.root]["bintree"]
- uri = bintree.get_pkgindex_uri(parent.cpv)
- dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
- bindb = bintree.dbapi
- if self.rebuild_if_new_ver and uri and uri != dep_uri:
- cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
- for cpv in bindb.match(dep_pkg.slot_atom):
- if cpv_norev == catpkgsplit(cpv)[:-1]:
- dep_uri = bintree.get_pkgindex_uri(cpv)
- if uri == dep_uri:
- break
- if uri and uri != dep_uri:
- # 1) Remote binary package is invalid because it was
- # built without dep_pkg. Force rebuild.
- self.rebuild_list.add(root_slot)
- return True
- elif (parent.installed and
- root_slot not in self.reinstall_list):
- inst_build_time = parent.metadata.get("BUILD_TIME")
- try:
- bin_build_time, = bindb.aux_get(parent.cpv,
- ["BUILD_TIME"])
- except KeyError:
- continue
- if bin_build_time != inst_build_time:
- # 2) Remote binary package is valid, and local package
- # is not up to date. Force reinstall.
- reinstall = True
+ elif (parent.installed and
+ root_slot not in self.reinstall_list):
+ inst_build_time = parent.metadata.get("BUILD_TIME")
+ try:
+ bin_build_time, = bindb.aux_get(parent.cpv,
+ ["BUILD_TIME"])
+ except KeyError:
+ continue
+ if bin_build_time != inst_build_time:
+ # 2) Remote binary package is valid, and local package
+ # is not up to date. Force reinstall.
+ reinstall = True
if reinstall:
self.reinstall_list.add(root_slot)
return reinstall
@@ -282,31 +282,15 @@ class _rebuild_config(object):
need_restart = False
graph = self._graph
build_deps = {}
- runtime_deps = {}
- leaf_nodes = deque(graph.leaf_nodes())
-
- def ignore_non_runtime(priority):
- return not priority.runtime
- def ignore_non_buildtime(priority):
- return not priority.buildtime
+ leaf_nodes = deque(graph.leaf_nodes())
# Trigger rebuilds bottom-up (starting with the leaves) so that parents
# will always know which children are being rebuilt.
while graph:
if not leaf_nodes:
- # We're interested in intersection of buildtime and runtime,
- # so ignore edges that do not contain both.
- leaf_nodes.extend(graph.leaf_nodes(
- ignore_priority=ignore_non_runtime))
- if not leaf_nodes:
- leaf_nodes.extend(graph.leaf_nodes(
- ignore_priority=ignore_non_buildtime))
- if not leaf_nodes:
- # We'll have to drop an edge that is both
- # buildtime and runtime. This should be
- # quite rare.
- leaf_nodes.append(graph.order[-1])
+ # We'll have to drop an edge. This should be quite rare.
+ leaf_nodes.append(graph.order[-1])
node = leaf_nodes.popleft()
if node not in graph:
@@ -315,32 +299,23 @@ class _rebuild_config(object):
slot_atom = node.slot_atom
# Remove our leaf node from the graph, keeping track of deps.
- parents = graph.nodes[node][1].items()
+ parents = graph.parent_nodes(node)
graph.remove(node)
node_build_deps = build_deps.get(node, {})
- node_runtime_deps = runtime_deps.get(node, {})
- for parent, priorities in parents:
+ for parent in parents:
if parent == node:
# Ignore a direct cycle.
continue
parent_bdeps = build_deps.setdefault(parent, {})
- parent_rdeps = runtime_deps.setdefault(parent, {})
- for priority in priorities:
- if priority.buildtime:
- parent_bdeps[slot_atom] = node
- if priority.runtime:
- parent_rdeps[slot_atom] = node
- if slot_atom in parent_bdeps and slot_atom in parent_rdeps:
- parent_rdeps.update(node_runtime_deps)
+ parent_bdeps[slot_atom] = node
if not graph.child_nodes(parent):
leaf_nodes.append(parent)
# Trigger rebuilds for our leaf node. Because all of our children
- # have been processed, build_deps and runtime_deps will be
- # completely filled in, and self.rebuild_list / self.reinstall_list
- # will tell us whether any of our children need to be rebuilt or
- # reinstalled.
- if self._trigger_rebuild(node, node_build_deps, node_runtime_deps):
+ # have been processed, the build_deps will be completely filled in,
+ # and self.rebuild_list / self.reinstall_list will tell us whether
+ # any of our children need to be rebuilt or reinstalled.
+ if self._trigger_rebuild(node, node_build_deps):
need_restart = True
return need_restart
@@ -416,6 +391,11 @@ class _dynamic_depgraph_config(object):
self._ignored_deps = []
self._highest_pkg_cache = {}
+ # Binary packages that have been rejected because their USE
+ # didn't match the user's config. It maps packages to a set
+ # of flags causing the rejection.
+ self.ignored_binaries = {}
+
self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
self._needed_license_changes = backtrack_parameters.needed_license_changes
@@ -536,9 +516,15 @@ class depgraph(object):
for myroot in self._frozen_config.trees:
+ dynamic_deps = self._dynamic_config.myparams.get(
+ "dynamic_deps", "y") != "n"
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
+ if self._frozen_config.myopts.get("--root-deps") is not None and \
+ myroot != self._frozen_config.target_root:
+ continue
+
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
@@ -557,8 +543,11 @@ class depgraph(object):
for pkg in vardb:
self._spinner_update()
- # This triggers metadata updates via FakeVartree.
- vardb.aux_get(pkg.cpv, [])
+ if dynamic_deps:
+ # This causes FakeVartree to update the
+ # Package instance dependencies via
+ # PackageVirtualDbapi.aux_update()
+ vardb.aux_get(pkg.cpv, [])
fakedb.cpv_inject(pkg)
self._dynamic_config._vdb_loaded = True
@@ -567,6 +556,67 @@ class depgraph(object):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
+ def _show_ignored_binaries(self):
+ """
+ Show binaries that have been ignored because their USE didn't
+ match the user's config.
+ """
+ if not self._dynamic_config.ignored_binaries \
+ or '--quiet' in self._frozen_config.myopts \
+ or self._dynamic_config.myparams.get(
+ "binpkg_respect_use") in ("y", "n"):
+ return
+
+ for pkg in list(self._dynamic_config.ignored_binaries):
+
+ selected_pkg = self._dynamic_config.mydbapi[pkg.root
+ ].match_pkgs(pkg.slot_atom)
+
+ if not selected_pkg:
+ continue
+
+ selected_pkg = selected_pkg[-1]
+ if selected_pkg > pkg:
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ continue
+
+ if selected_pkg.installed and \
+ selected_pkg.cpv == pkg.cpv and \
+ selected_pkg.metadata.get('BUILD_TIME') == \
+ pkg.metadata.get('BUILD_TIME'):
+ # We don't care about ignored binaries when an
+ # identical installed instance is selected to
+ # fill the slot.
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ continue
+
+ if not self._dynamic_config.ignored_binaries:
+ return
+
+ self._show_merge_list()
+
+ writemsg("\n!!! The following binary packages have been ignored " + \
+ "due to non matching USE:\n\n", noiselevel=-1)
+
+ for pkg, flags in self._dynamic_config.ignored_binaries.items():
+ writemsg(" =%s" % pkg.cpv, noiselevel=-1)
+ if pkg.root_config.settings["ROOT"] != "/":
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n use flag(s): %s\n" % ", ".join(sorted(flags)),
+ noiselevel=-1)
+
+ msg = [
+ "",
+ "NOTE: The --binpkg-respect-use=n option will prevent emerge",
+ " from ignoring these binary packages if possible.",
+ " Using --binpkg-respect-use=y will silence this warning."
+ ]
+
+ for line in msg:
+ if line:
+ line = colorize("INFORM", line)
+ writemsg(line + "\n", noiselevel=-1)
+
def _show_missed_update(self):
# In order to minimize noise, show only the highest
@@ -578,6 +628,10 @@ class depgraph(object):
# Exclude installed here since we only
# want to show available updates.
continue
+ chosen_pkg = self._dynamic_config.mydbapi[pkg.root
+ ].match_pkgs(pkg.slot_atom)
+ if not chosen_pkg or chosen_pkg[-1] >= pkg:
+ continue
k = (pkg.root, pkg.slot_atom)
if k in missed_updates:
other_pkg, mask_type, parent_atoms = missed_updates[k]
@@ -613,6 +667,7 @@ class depgraph(object):
if not missed_updates:
return
+ self._show_merge_list()
backtrack_masked = []
for pkg, parent_atoms in missed_updates:
@@ -630,7 +685,7 @@ class depgraph(object):
"due to unsatisfied dependencies:\n\n", noiselevel=-1)
writemsg(str(pkg.slot_atom), noiselevel=-1)
- if pkg.root != '/':
+ if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
@@ -646,7 +701,7 @@ class depgraph(object):
"!!! triggered by backtracking:\n\n", noiselevel=-1)
for pkg, parent_atoms in backtrack_masked:
writemsg(str(pkg.slot_atom), noiselevel=-1)
- if pkg.root != '/':
+ if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
@@ -655,6 +710,7 @@ class depgraph(object):
if not missed_updates:
return
+ self._show_merge_list()
msg = []
msg.append("\nWARNING: One or more updates have been " + \
"skipped due to a dependency conflict:\n\n")
@@ -662,7 +718,7 @@ class depgraph(object):
indent = " "
for pkg, parent_atoms in missed_updates:
msg.append(str(pkg.slot_atom))
- if pkg.root != '/':
+ if pkg.root_config.settings["ROOT"] != "/":
msg.append(" for %s" % (pkg.root,))
msg.append("\n\n")
@@ -777,19 +833,28 @@ class depgraph(object):
else:
self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
- def _reinstall_for_flags(self, forced_flags,
+ def _reinstall_for_flags(self, pkg, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
"""Return a set of flags that trigger reinstallation, or None if there
are no such flags."""
- if "--newuse" in self._frozen_config.myopts or \
- "--binpkg-respect-use" in self._frozen_config.myopts:
+
+ # binpkg_respect_use: Behave like newuse by default. If newuse is
+ # False and changed_use is True, then behave like changed_use.
+ binpkg_respect_use = (pkg.built and
+ self._dynamic_config.myparams.get("binpkg_respect_use")
+ in ("y", "auto"))
+ newuse = "--newuse" in self._frozen_config.myopts
+ changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
+
+ if newuse or (binpkg_respect_use and not changed_use):
flags = set(orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags))
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
if flags:
return flags
- elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
+
+ elif changed_use or binpkg_respect_use:
flags = orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use))
if flags:
@@ -827,7 +892,7 @@ class depgraph(object):
relationships from nested sets
@type add_to_digraph: Boolean
@rtype: Iterable
- @returns: All args given in the input together with additional
+ @return: All args given in the input together with additional
SetArg instances that are generated from nested sets
"""
@@ -876,8 +941,6 @@ class depgraph(object):
debug = "--debug" in self._frozen_config.myopts
buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
nodeps = "--nodeps" in self._frozen_config.myopts
- deep = self._dynamic_config.myparams.get("deep", 0)
- recurse = deep is True or dep.depth <= deep
if dep.blocker:
if not buildpkgonly and \
not nodeps and \
@@ -922,7 +985,7 @@ class depgraph(object):
# infinite backtracking loop.
if self._dynamic_config._allow_backtracking:
if dep.parent in self._dynamic_config._runtime_pkg_mask:
- if "--debug" in self._frozen_config.myopts:
+ if debug:
writemsg(
"!!! backtracking loop detected: %s %s\n" % \
(dep.parent,
@@ -937,7 +1000,7 @@ class depgraph(object):
if dep_pkg is None:
self._dynamic_config._backtrack_infos["missing dependency"] = dep
self._dynamic_config._need_restart = True
- if "--debug" in self._frozen_config.myopts:
+ if debug:
msg = []
msg.append("")
msg.append("")
@@ -1009,17 +1072,18 @@ class depgraph(object):
else:
# Display the specific atom from SetArg or
# Package types.
+ uneval = ""
+ if dep.atom is not dep.atom.unevaluated_atom:
+ uneval = " (%s)" % (dep.atom.unevaluated_atom,)
writemsg_level(
- "%s%s required by %s\n" %
- ("Parent Dep:".ljust(15), dep.atom, myparent),
+ "%s%s%s required by %s\n" %
+ ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
level=logging.DEBUG, noiselevel=-1)
# Ensure that the dependencies of the same package
# are never processed more than once.
previously_added = pkg in self._dynamic_config.digraph
- # select the correct /var database that we'll be checking against
- vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
arg_atoms = None
@@ -1036,7 +1100,7 @@ class depgraph(object):
# package selection, since we want to prompt the user
# for USE adjustment rather than have REQUIRED_USE
# affect package selection and || dep choices.
- if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
+ if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
eapi_has_required_use(pkg.metadata["EAPI"]):
required_use_is_sat = check_required_use(
pkg.metadata["REQUIRED_USE"],
@@ -1055,7 +1119,8 @@ class depgraph(object):
if atom is None:
atom = Atom("=" + pkg.cpv)
self._dynamic_config._unsatisfied_deps_for_display.append(
- ((pkg.root, atom), {"myparent":dep.parent}))
+ ((pkg.root, atom),
+ {"myparent" : dep.parent, "show_req_use" : pkg}))
self._dynamic_config._skip_restart = True
return 0
@@ -1146,11 +1211,6 @@ class depgraph(object):
all_match = False
break
- if to_be_selected >= to_be_masked:
- # We only care about the parent atoms
- # when they trigger a downgrade.
- parent_atoms = set()
-
fallback_data.append((to_be_masked, parent_atoms))
if all_match:
@@ -1244,7 +1304,7 @@ class depgraph(object):
settings.unlock()
settings.setinst(pkg.cpv, pkg.metadata)
settings.lock()
- except portage.exception.InvalidDependString as e:
+ except portage.exception.InvalidDependString:
if not pkg.installed:
# should have been masked before it was selected
raise
@@ -1265,12 +1325,11 @@ class depgraph(object):
self._dynamic_config.digraph.add(pkg, parent, priority=priority)
self._add_parent_atom(pkg, parent_atom)
- """ This section determines whether we go deeper into dependencies or not.
- We want to go deeper on a few occasions:
- Installing package A, we need to make sure package A's deps are met.
- emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
- If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
- """
+ # This section determines whether we go deeper into dependencies or not.
+ # We want to go deeper on a few occasions:
+ # Installing package A, we need to make sure package A's deps are met.
+ # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
+ # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
if arg_atoms:
depth = 0
pkg.depth = depth
@@ -1318,13 +1377,8 @@ class depgraph(object):
def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
- mytype = pkg.type_name
myroot = pkg.root
- mykey = pkg.cpv
metadata = pkg.metadata
- myuse = self._pkg_use_enabled(pkg)
- jbigkey = pkg
- depth = pkg.depth + 1
removal_action = "remove" in self._dynamic_config.myparams
edepend={}
@@ -1361,7 +1415,7 @@ class depgraph(object):
if removal_action:
depend_root = myroot
else:
- depend_root = "/"
+ depend_root = self._frozen_config._running_root.root
root_deps = self._frozen_config.myopts.get("--root-deps")
if root_deps is not None:
if root_deps is True:
@@ -1388,7 +1442,6 @@ class depgraph(object):
)
debug = "--debug" in self._frozen_config.myopts
- strict = mytype != "installed"
for dep_root, dep_string, dep_priority in deps:
if not dep_string:
@@ -1481,7 +1534,7 @@ class depgraph(object):
selected_atoms = self._select_atoms(dep_root,
dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
strict=strict, priority=dep_priority)
- except portage.exception.InvalidDependString as e:
+ except portage.exception.InvalidDependString:
if pkg.installed:
self._dynamic_config._masked_installed.add(pkg)
return 1
@@ -1731,7 +1784,7 @@ class depgraph(object):
pkg_atom_map.setdefault(pkg, set()).add(atom)
cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
- for cp, pkgs in cp_pkg_map.items():
+ for pkgs in cp_pkg_map.values():
if len(pkgs) < 2:
for pkg in pkgs:
for atom in pkg_atom_map[pkg]:
@@ -1807,7 +1860,7 @@ class depgraph(object):
i += 1
else:
try:
- x = portage.dep.Atom(x)
+ x = portage.dep.Atom(x, eapi=pkg.metadata["EAPI"])
except portage.exception.InvalidAtom:
if not pkg.installed:
raise portage.exception.InvalidDependString(
@@ -1855,7 +1908,7 @@ class depgraph(object):
@param atom_without_category: an atom without a category component
@type atom_without_category: String
@rtype: list
- @returns: a list of atoms containing categories (possibly empty)
+ @return: a list of atoms containing categories (possibly empty)
"""
null_cp = portage.dep_getkey(insert_category_into_atom(
atom_without_category, "null"))
@@ -1886,7 +1939,6 @@ class depgraph(object):
def _iter_atoms_for_pkg(self, pkg):
depgraph_sets = self._dynamic_config.sets[pkg.root]
atom_arg_map = depgraph_sets.atom_arg_map
- root_config = self._frozen_config.roots[pkg.root]
for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
if atom.cp != pkg.cp and \
self._have_new_virt(pkg.root, atom.cp):
@@ -1923,13 +1975,13 @@ class depgraph(object):
sets = root_config.sets
depgraph_sets = self._dynamic_config.sets[root_config.root]
myfavorites=[]
- myroot = self._frozen_config.target_root
- dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
- vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
- real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
- portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
- bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
- pkgsettings = self._frozen_config.pkgsettings[myroot]
+ eroot = root_config.root
+ root = root_config.settings['ROOT']
+ vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
+ real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
+ portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
+ bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[eroot]
args = []
onlydeps = "--onlydeps" in self._frozen_config.myopts
lookup_owners = []
@@ -1950,7 +2002,7 @@ class depgraph(object):
mytbz2=portage.xpak.tbz2(x)
mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
if os.path.realpath(x) != \
- os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
+ os.path.realpath(bindb.bintree.getname(mykey)):
writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, myfavorites
@@ -1996,9 +2048,9 @@ class depgraph(object):
args.append(PackageArg(arg=x, package=pkg,
root_config=root_config))
elif x.startswith(os.path.sep):
- if not x.startswith(myroot):
+ if not x.startswith(eroot):
portage.writemsg(("\n\n!!! '%s' does not start with" + \
- " $ROOT.\n") % x, noiselevel=-1)
+ " $EROOT.\n") % x, noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, []
# Queue these up since it's most efficient to handle
@@ -2007,9 +2059,9 @@ class depgraph(object):
elif x.startswith("." + os.sep) or \
x.startswith(".." + os.sep):
f = os.path.abspath(x)
- if not f.startswith(myroot):
+ if not f.startswith(eroot):
portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
- " $ROOT.\n") % (f, x), noiselevel=-1)
+ " $EROOT.\n") % (f, x), noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, []
lookup_owners.append(f)
@@ -2126,7 +2178,7 @@ class depgraph(object):
for x in lookup_owners:
if not search_for_multiple and os.path.isdir(x):
search_for_multiple = True
- relative_paths.append(x[len(myroot)-1:])
+ relative_paths.append(x[len(root)-1:])
owners = set()
for pkg, relative_path in \
@@ -2526,24 +2578,36 @@ class depgraph(object):
# account for masking and USE settings.
_autounmask_backup = self._dynamic_config._autounmask
self._dynamic_config._autounmask = False
- mytrees["pkg_use_enabled"] = self._pkg_use_enabled
+ # backup state for restoration, in case of recursive
+ # calls to this method
+ backup_state = mytrees.copy()
try:
+ # clear state from previous call, in case this
+ # call is recursive (we have a backup, that we
+ # will use to restore it later)
+ mytrees.pop("pkg_use_enabled", None)
+ mytrees.pop("parent", None)
+ mytrees.pop("atom_graph", None)
+ mytrees.pop("priority", None)
+
+ mytrees["pkg_use_enabled"] = self._pkg_use_enabled
if parent is not None:
- trees[root]["parent"] = parent
- trees[root]["atom_graph"] = atom_graph
+ mytrees["parent"] = parent
+ mytrees["atom_graph"] = atom_graph
if priority is not None:
- trees[root]["priority"] = priority
+ mytrees["priority"] = priority
+
mycheck = portage.dep_check(depstring, None,
pkgsettings, myuse=myuse,
myroot=root, trees=trees)
finally:
+ # restore state
self._dynamic_config._autounmask = _autounmask_backup
- del mytrees["pkg_use_enabled"]
- if parent is not None:
- trees[root].pop("parent")
- trees[root].pop("atom_graph")
- if priority is not None:
- trees[root].pop("priority")
+ mytrees.pop("pkg_use_enabled", None)
+ mytrees.pop("parent", None)
+ mytrees.pop("atom_graph", None)
+ mytrees.pop("priority", None)
+ mytrees.update(backup_state)
if not mycheck[0]:
raise portage.exception.InvalidDependString(mycheck[1])
if parent is None:
@@ -2637,6 +2701,38 @@ class depgraph(object):
continue
yield atom
+ def _virt_deps_visible(self, pkg, ignore_use=False):
+ """
+ Assumes pkg is a virtual package. Traverses virtual deps recursively
+ and returns True if all deps are visible, False otherwise. This is
+ useful for checking if it will be necessary to expand virtual slots,
+ for cases like bug #382557.
+ """
+ try:
+ rdepend = self._select_atoms(
+ pkg.root, pkg.metadata.get("RDEPEND", ""),
+ myuse=self._pkg_use_enabled(pkg),
+ parent=pkg, priority=self._priority(runtime=True))
+ except InvalidDependString as e:
+ if not pkg.installed:
+ raise
+ writemsg_level("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (pkg.root, pkg.cpv, e),
+ noiselevel=-1, level=logging.ERROR)
+ return False
+
+ for atoms in rdepend.values():
+ for atom in atoms:
+ if ignore_use:
+ atom = atom.without_use
+ pkg, existing = self._select_package(
+ pkg.root, atom)
+ if pkg is None or not self._pkg_visibility_check(pkg):
+ return False
+
+ return True
+
def _get_dep_chain(self, start_node, target_atom=None,
unsatisfied_dependency=False):
"""
@@ -2652,6 +2748,7 @@ class depgraph(object):
node = start_node
child = None
all_parents = self._dynamic_config._parent_atoms
+ graph = self._dynamic_config.digraph
if target_atom is not None and isinstance(node, Package):
affecting_use = set()
@@ -2676,11 +2773,46 @@ class depgraph(object):
dep_chain.append((pkg_name, node.type_name))
+
+ # To build a dep chain for the given package we take
+ # "random" parents form the digraph, except for the
+ # first package, because we want a parent that forced
+ # the corresponding change (i.e '>=foo-2', instead 'foo').
+
+ traversed_nodes.add(start_node)
+
+ start_node_parent_atoms = {}
+ for ppkg, patom in all_parents.get(node, []):
+ # Get a list of suitable atoms. For use deps
+ # (aka unsatisfied_dependency is not None) we
+ # need that the start_node doesn't match the atom.
+ if not unsatisfied_dependency or \
+ not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
+ start_node_parent_atoms.setdefault(patom, []).append(ppkg)
+
+ if start_node_parent_atoms:
+ # If there are parents in all_parents then use one of them.
+ # If not, then this package got pulled in by an Arg and
+ # will be correctly handled by the code that handles later
+ # packages in the dep chain.
+ best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
+
+ child = node
+ for ppkg in start_node_parent_atoms[best_match]:
+ node = ppkg
+ if ppkg in self._dynamic_config._initial_arg_list:
+ # Stop if reached the top level of the dep chain.
+ break
+
while node is not None:
traversed_nodes.add(node)
- if isinstance(node, DependencyArg):
- if self._dynamic_config.digraph.parent_nodes(node):
+ if node not in graph:
+ # The parent is not in the graph due to backtracking.
+ break
+
+ elif isinstance(node, DependencyArg):
+ if graph.parent_nodes(node):
node_type = "set"
else:
node_type = "argument"
@@ -2689,17 +2821,29 @@ class depgraph(object):
elif node is not start_node:
for ppkg, patom in all_parents[child]:
if ppkg == node:
+ if child is start_node and unsatisfied_dependency and \
+ InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(child):
+ # This atom is satisfied by child, there must be another atom.
+ continue
atom = patom.unevaluated_atom
break
dep_strings = set()
- for priority in self._dynamic_config.digraph.nodes[node][0][child]:
- if priority.buildtime:
- dep_strings.add(node.metadata["DEPEND"])
- if priority.runtime:
- dep_strings.add(node.metadata["RDEPEND"])
- if priority.runtime_post:
- dep_strings.add(node.metadata["PDEPEND"])
+ priorities = graph.nodes[node][0].get(child)
+ if priorities is None:
+ # This edge comes from _parent_atoms and was not added to
+ # the graph, and _parent_atoms does not contain priorities.
+ dep_strings.add(node.metadata["DEPEND"])
+ dep_strings.add(node.metadata["RDEPEND"])
+ dep_strings.add(node.metadata["PDEPEND"])
+ else:
+ for priority in priorities:
+ if priority.buildtime:
+ dep_strings.add(node.metadata["DEPEND"])
+ if priority.runtime:
+ dep_strings.add(node.metadata["RDEPEND"])
+ if priority.runtime_post:
+ dep_strings.add(node.metadata["PDEPEND"])
affecting_use = set()
for dep_str in dep_strings:
@@ -2726,10 +2870,6 @@ class depgraph(object):
dep_chain.append((pkg_name, node.type_name))
- if node not in self._dynamic_config.digraph:
- # The parent is not in the graph due to backtracking.
- break
-
# When traversing to parents, prefer arguments over packages
# since arguments are root nodes. Never traverse the same
# package twice, in order to prevent an infinite loop.
@@ -2791,7 +2931,7 @@ class depgraph(object):
def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
- check_backtrack=False, check_autounmask_breakage=False):
+ check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
"""
When check_backtrack=True, no output is produced and
the method either returns or raises _backtrack_mask if
@@ -2810,14 +2950,13 @@ class depgraph(object):
xinfo = _unicode_decode('"%s"') % (myparent,)
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
- if root != "/":
+ if root != self._frozen_config._running_root.root:
xinfo = "%s for %s" % (xinfo, root)
masked_packages = []
missing_use = []
missing_use_adjustable = set()
required_use_unsatisfied = []
masked_pkg_instances = set()
- missing_licenses = []
have_eapi_mask = False
pkgsettings = self._frozen_config.pkgsettings[root]
root_config = self._frozen_config.roots[root]
@@ -2828,7 +2967,6 @@ class depgraph(object):
for db, pkg_type, built, installed, db_keys in dbs:
if installed:
continue
- match = db.match
if hasattr(db, "xmatch"):
cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
else:
@@ -2854,12 +2992,20 @@ class depgraph(object):
repo = metadata.get('repository')
pkg = self._pkg(cpv, pkg_type, root_config,
installed=installed, myrepo=repo)
- if not atom_set.findAtomForPackage(pkg,
- modified_use=self._pkg_use_enabled(pkg)):
- continue
# pkg.metadata contains calculated USE for ebuilds,
# required later for getMissingLicenses.
metadata = pkg.metadata
+ if pkg.invalid:
+ # Avoid doing any operations with packages that
+ # have invalid metadata. It would be unsafe at
+ # least because it could trigger unhandled
+ # exceptions in places like check_required_use().
+ masked_packages.append(
+ (root_config, pkgsettings, cpv, repo, metadata, mreasons))
+ continue
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
if pkg in self._dynamic_config._runtime_pkg_mask:
backtrack_reasons = \
self._dynamic_config._runtime_pkg_mask[pkg]
@@ -2887,7 +3033,7 @@ class depgraph(object):
raise
if not mreasons and \
not pkg.built and \
- pkg.metadata["REQUIRED_USE"] and \
+ pkg.metadata.get("REQUIRED_USE") and \
eapi_has_required_use(pkg.metadata["EAPI"]):
if not check_required_use(
pkg.metadata["REQUIRED_USE"],
@@ -2942,7 +3088,7 @@ class depgraph(object):
continue
missing_use_adjustable.add(pkg)
- required_use = pkg.metadata["REQUIRED_USE"]
+ required_use = pkg.metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(pkg)
@@ -2990,7 +3136,7 @@ class depgraph(object):
if untouchable_flags.intersection(involved_flags):
continue
- required_use = myparent.metadata["REQUIRED_USE"]
+ required_use = myparent.metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(myparent)
@@ -3066,62 +3212,66 @@ class depgraph(object):
mask_docs = False
- if required_use_unsatisfied:
+ if show_req_use is None and required_use_unsatisfied:
# We have an unmasked package that only requires USE adjustment
# in order to satisfy REQUIRED_USE, and nothing more. We assume
# that the user wants the latest version, so only the first
# instance is displayed.
- pkg = required_use_unsatisfied[0]
+ show_req_use = required_use_unsatisfied[0]
+
+ if show_req_use is not None:
+
+ pkg = show_req_use
output_cpv = pkg.cpv + _repo_separator + pkg.repo
- writemsg_stdout("\n!!! " + \
+ writemsg("\n!!! " + \
colorize("BAD", "The ebuild selected to satisfy ") + \
colorize("INFORM", xinfo) + \
colorize("BAD", " has unmet requirements.") + "\n",
noiselevel=-1)
use_display = pkg_use_display(pkg, self._frozen_config.myopts)
- writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
+ writemsg("- %s %s\n" % (output_cpv, use_display),
noiselevel=-1)
- writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
+ writemsg("\n The following REQUIRED_USE flag constraints " + \
"are unsatisfied:\n", noiselevel=-1)
reduced_noise = check_required_use(
pkg.metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
pkg.iuse.is_valid_flag).tounicode()
- writemsg_stdout(" %s\n" % \
+ writemsg(" %s\n" % \
human_readable_required_use(reduced_noise),
noiselevel=-1)
normalized_required_use = \
" ".join(pkg.metadata["REQUIRED_USE"].split())
if reduced_noise != normalized_required_use:
- writemsg_stdout("\n The above constraints " + \
+ writemsg("\n The above constraints " + \
"are a subset of the following complete expression:\n",
noiselevel=-1)
- writemsg_stdout(" %s\n" % \
+ writemsg(" %s\n" % \
human_readable_required_use(normalized_required_use),
noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
elif show_missing_use:
- writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
- writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
+ writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
for pkg, mreasons in show_missing_use:
- writemsg_stdout("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
+ writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
elif masked_packages:
- writemsg_stdout("\n!!! " + \
+ writemsg("\n!!! " + \
colorize("BAD", "All ebuilds that could satisfy ") + \
colorize("INFORM", xinfo) + \
colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
- writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
+ writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
have_eapi_mask = show_masked_packages(masked_packages)
if have_eapi_mask:
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
msg = ("The current version of portage supports " + \
"EAPI '%s'. You must upgrade to a newer version" + \
" of portage before EAPI masked packages can" + \
" be installed.") % portage.const.EAPI
- writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
mask_docs = True
else:
cp_exists = False
@@ -3131,7 +3281,7 @@ class depgraph(object):
cp_exists = True
break
- writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
if isinstance(myparent, AtomArg) and \
not cp_exists and \
self._frozen_config.myopts.get(
@@ -3141,12 +3291,13 @@ class depgraph(object):
if cat == "null":
cat = None
- writemsg_stdout("\nemerge: searching for similar names..."
+ writemsg("\nemerge: searching for similar names..."
, noiselevel=-1)
all_cp = set()
all_cp.update(vardb.cp_all())
- all_cp.update(portdb.cp_all())
+ if "--usepkgonly" not in self._frozen_config.myopts:
+ all_cp.update(portdb.cp_all())
if "--usepkg" in self._frozen_config.myopts:
all_cp.update(bindb.cp_all())
# discard dir containing no ebuilds
@@ -3164,9 +3315,18 @@ class depgraph(object):
for other_cp in list(all_cp):
other_pkg = portage.catsplit(other_cp)[1]
if other_pkg == pkg:
- # discard dir containing no ebuilds
- all_cp.discard(other_cp)
- continue
+ # Check for non-identical package that
+ # differs only by upper/lower case.
+ identical = True
+ for cp_orig in orig_cp_map[other_cp]:
+ if portage.catsplit(cp_orig)[1] != \
+ portage.catsplit(atom.cp)[1]:
+ identical = False
+ break
+ if identical:
+ # discard dir containing no ebuilds
+ all_cp.discard(other_cp)
+ continue
pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
matches = []
@@ -3179,16 +3339,16 @@ class depgraph(object):
matches = matches_orig_case
if len(matches) == 1:
- writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
, noiselevel=-1)
elif len(matches) > 1:
- writemsg_stdout(
+ writemsg(
"\nemerge: Maybe you meant any of these: %s?\n" % \
(", ".join(matches),), noiselevel=-1)
else:
# Generally, this would only happen if
# all dbapis are empty.
- writemsg_stdout(" nothing similar found.\n"
+ writemsg(" nothing similar found.\n"
, noiselevel=-1)
msg = []
if not isinstance(myparent, AtomArg):
@@ -3201,12 +3361,12 @@ class depgraph(object):
(node)), node_type))
if msg:
- writemsg_stdout("\n".join(msg), noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n".join(msg), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
if mask_docs:
show_mask_docs()
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
for db, pkg_type, built, installed, db_keys in \
@@ -3224,51 +3384,12 @@ class depgraph(object):
"""
db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
-
- if hasattr(db, "xmatch"):
- # For portdbapi we match only against the cpv, in order
- # to bypass unnecessary cache access for things like IUSE
- # and SLOT. Later, we cache the metadata in a Package
- # instance, and use that for further matching. This
- # optimization is especially relevant since
- # pordbapi.aux_get() does not cache calls that have
- # myrepo or mytree arguments.
- cpv_list = db.xmatch("match-all-cpv-only", atom)
- else:
- cpv_list = db.match(atom)
-
- # USE=multislot can make an installed package appear as if
- # it doesn't satisfy a slot dependency. Rebuilding the ebuild
- # won't do any good as long as USE=multislot is enabled since
- # the newly built package still won't have the expected slot.
- # Therefore, assume that such SLOT dependencies are already
- # satisfied rather than forcing a rebuild.
+ atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
+ cp_list = db.cp_list(atom_exp.cp)
+ matched_something = False
installed = pkg_type == 'installed'
- if installed and not cpv_list and atom.slot:
- for cpv in db.match(atom.cp):
- slot_available = False
- for other_db, other_type, other_built, \
- other_installed, other_keys in \
- self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
- try:
- if atom.slot == \
- other_db.aux_get(cpv, ["SLOT"])[0]:
- slot_available = True
- break
- except KeyError:
- pass
- if not slot_available:
- continue
- inst_pkg = self._pkg(cpv, "installed",
- root_config, installed=installed, myrepo = atom.repo)
- # Remove the slot from the atom and verify that
- # the package matches the resulting atom.
- if portage.match_from_list(
- atom.without_slot, [inst_pkg]):
- yield inst_pkg
- return
-
- if cpv_list:
+
+ if cp_list:
atom_set = InternalPackageSet(initial_atoms=(atom,),
allow_repo=True)
if atom.repo is None and hasattr(db, "getRepositories"):
@@ -3277,8 +3398,13 @@ class depgraph(object):
repo_list = [atom.repo]
# descending order
- cpv_list.reverse()
- for cpv in cpv_list:
+ cp_list.reverse()
+ for cpv in cp_list:
+ # Call match_from_list on one cpv at a time, in order
+ # to avoid unnecessary match_from_list comparisons on
+ # versions that are never yielded from this method.
+ if not match_from_list(atom_exp, [cpv]):
+ continue
for repo in repo_list:
try:
@@ -3295,16 +3421,63 @@ class depgraph(object):
# Make sure that cpv from the current repo satisfies the atom.
# This might not be the case if there are several repos with
# the same cpv, but different metadata keys, like SLOT.
- # Also, for portdbapi, parts of the match that require
- # metadata access are deferred until we have cached the
- # metadata in a Package instance.
+ # Also, parts of the match that require metadata access
+ # are deferred until we have cached the metadata in a
+ # Package instance.
if not atom_set.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
+ matched_something = True
yield pkg
+ # USE=multislot can make an installed package appear as if
+ # it doesn't satisfy a slot dependency. Rebuilding the ebuild
+ # won't do any good as long as USE=multislot is enabled since
+ # the newly built package still won't have the expected slot.
+ # Therefore, assume that such SLOT dependencies are already
+ # satisfied rather than forcing a rebuild.
+ if not matched_something and installed and atom.slot is not None:
+
+ if "remove" in self._dynamic_config.myparams:
+ # We need to search the portdbapi, which is not in our
+ # normal dbs list, in order to find the real SLOT.
+ portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
+ db_keys = list(portdb._aux_cache_keys)
+ dbs = [(portdb, "ebuild", False, False, db_keys)]
+ else:
+ dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+
+ cp_list = db.cp_list(atom_exp.cp)
+ if cp_list:
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom.without_slot,), allow_repo=True)
+ atom_exp_without_slot = atom_exp.without_slot
+ cp_list.reverse()
+ for cpv in cp_list:
+ if not match_from_list(atom_exp_without_slot, [cpv]):
+ continue
+ slot_available = False
+ for other_db, other_type, other_built, \
+ other_installed, other_keys in dbs:
+ try:
+ if atom.slot == \
+ other_db.aux_get(cpv, ["SLOT"])[0]:
+ slot_available = True
+ break
+ except KeyError:
+ pass
+ if not slot_available:
+ continue
+ inst_pkg = self._pkg(cpv, "installed",
+ root_config, installed=installed, myrepo=atom.repo)
+ # Remove the slot from the atom and verify that
+ # the package matches the resulting atom.
+ if atom_set.findAtomForPackage(inst_pkg):
+ yield inst_pkg
+ return
+
def _select_pkg_highest_available(self, root, atom, onlydeps=False):
- cache_key = (root, atom, onlydeps)
+ cache_key = (root, atom, atom.unevaluated_atom, onlydeps)
ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
if ret is not None:
pkg, existing = ret
@@ -3320,7 +3493,6 @@ class depgraph(object):
self._dynamic_config._highest_pkg_cache[cache_key] = ret
pkg, existing = ret
if pkg is not None:
- settings = pkg.root_config.settings
if self._pkg_visibility_check(pkg) and \
not (pkg.installed and pkg.masks):
self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
@@ -3347,40 +3519,81 @@ class depgraph(object):
return False
return True
+ class _AutounmaskLevel(object):
+ __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
+ "allow_missing_keywords", "allow_unmasks")
+
+ def __init__(self):
+ self.allow_use_changes = False
+ self.allow_license_changes = False
+ self.allow_unstable_keywords = False
+ self.allow_missing_keywords = False
+ self.allow_unmasks = False
+
+ def _autounmask_levels(self):
+ """
+ Iterate over the different allowed things to unmask.
+
+ 1. USE
+ 2. USE + ~arch + license
+ 3. USE + ~arch + license + missing keywords
+ 4. USE + ~arch + license + masks
+ 5. USE + ~arch + license + missing keywords + masks
+
+ Some thoughts:
+ * Do least invasive changes first.
+ * Try unmasking alone before unmasking + missing keywords
+ to avoid -9999 versions if possible
+ """
+
+ if self._dynamic_config._autounmask is not True:
+ return
+
+ autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
+ autounmask_level = self._AutounmaskLevel()
+
+ autounmask_level.allow_use_changes = True
+
+ for only_use_changes in (True, False):
+
+ autounmask_level.allow_unstable_keywords = (not only_use_changes)
+ autounmask_level.allow_license_changes = (not only_use_changes)
+
+ for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
+
+ if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
+ break
+
+ autounmask_level.allow_missing_keywords = missing_keyword
+ autounmask_level.allow_unmasks = unmask
+
+ yield autounmask_level
+
+
def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
default_selection = (pkg, existing)
- if self._dynamic_config._autounmask is True:
+ def reset_pkg(pkg):
if pkg is not None and \
pkg.installed and \
not self._want_installed_pkg(pkg):
pkg = None
- for only_use_changes in True, False:
+ if self._dynamic_config._autounmask is True:
+ reset_pkg(pkg)
+
+ for autounmask_level in self._autounmask_levels():
if pkg is not None:
break
- for allow_unmasks in (False, True):
- if only_use_changes and allow_unmasks:
- continue
+ pkg, existing = \
+ self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps,
+ autounmask_level=autounmask_level)
- if pkg is not None:
- break
-
- pkg, existing = \
- self._wrapped_select_pkg_highest_available_imp(
- root, atom, onlydeps=onlydeps,
- allow_use_changes=True,
- allow_unstable_keywords=(not only_use_changes),
- allow_license_changes=(not only_use_changes),
- allow_unmasks=allow_unmasks)
-
- if pkg is not None and \
- pkg.installed and \
- not self._want_installed_pkg(pkg):
- pkg = None
+ reset_pkg(pkg)
if self._dynamic_config._need_restart:
return None, None
@@ -3392,21 +3605,20 @@ class depgraph(object):
return pkg, existing
- def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
+ def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
if pkg.visible:
return True
- if pkg in self._dynamic_config.digraph:
+ if trust_graph and pkg in self._dynamic_config.digraph:
# Sometimes we need to temporarily disable
# dynamic_config._autounmask, but for overall
- # consistency in dependency resolution, in any
- # case we want to respect autounmask visibity
- # for packages that have already been added to
- # the dependency graph.
+ # consistency in dependency resolution, in most
+ # cases we want to treat packages in the graph
+ # as though they are visible.
return True
- if not self._dynamic_config._autounmask:
+ if not self._dynamic_config._autounmask or autounmask_level is None:
return False
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
@@ -3455,11 +3667,10 @@ class depgraph(object):
#Package has already been unmasked.
return True
- #We treat missing keywords in the same way as masks.
- if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
- (masked_by_missing_keywords and not allow_unmasks) or \
- (masked_by_p_mask and not allow_unmasks) or \
- (missing_licenses and not allow_license_changes):
+ if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
+ (masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
+ (masked_by_p_mask and not autounmask_level.allow_unmasks) or \
+ (missing_licenses and not autounmask_level.allow_license_changes):
#We are not allowed to do the needed changes.
return False
@@ -3556,7 +3767,7 @@ class depgraph(object):
if new_changes != old_changes:
#Don't do the change if it violates REQUIRED_USE.
- required_use = pkg.metadata["REQUIRED_USE"]
+ required_use = pkg.metadata.get("REQUIRED_USE")
if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
return old_use
@@ -3574,13 +3785,11 @@ class depgraph(object):
self._dynamic_config._need_restart = True
return new_use
- def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
- allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
+ def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
root_config = self._frozen_config.roots[root]
pkgsettings = self._frozen_config.pkgsettings[root]
dbs = self._dynamic_config._filtered_trees[root]["dbs"]
vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
- portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
# List of acceptable packages, ordered by type preference.
matched_packages = []
matched_pkgs_ignore_use = []
@@ -3588,6 +3797,8 @@ class depgraph(object):
if not isinstance(atom, portage.dep.Atom):
atom = portage.dep.Atom(atom)
atom_cp = atom.cp
+ have_new_virt = atom_cp.startswith("virtual/") and \
+ self._have_new_virt(root, atom_cp)
atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
existing_node = None
myeb = None
@@ -3635,6 +3846,9 @@ class depgraph(object):
# USE configuration.
for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
onlydeps=onlydeps):
+ if pkg.cp != atom_cp and have_new_virt:
+ # pull in a new-style virtual instead
+ continue
if pkg in self._dynamic_config._runtime_pkg_mask:
# The package has been masked by the backtracking logic
continue
@@ -3698,10 +3912,7 @@ class depgraph(object):
# _dep_check_composite_db, in order to prevent
# incorrect choices in || deps like bug #351828.
- if not self._pkg_visibility_check(pkg, \
- allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes,
- allow_unmasks=allow_unmasks):
+ if not self._pkg_visibility_check(pkg, autounmask_level):
continue
# Enable upgrade or downgrade to a version
@@ -3741,19 +3952,13 @@ class depgraph(object):
pkg_eb_visible = False
for pkg_eb in self._iter_match_pkgs(pkg.root_config,
"ebuild", Atom("=%s" % (pkg.cpv,))):
- if self._pkg_visibility_check(pkg_eb, \
- allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes,
- allow_unmasks=allow_unmasks):
+ if self._pkg_visibility_check(pkg_eb, autounmask_level):
pkg_eb_visible = True
break
if not pkg_eb_visible:
continue
else:
- if not self._pkg_visibility_check(pkg_eb, \
- allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes,
- allow_unmasks=allow_unmasks):
+ if not self._pkg_visibility_check(pkg_eb, autounmask_level):
continue
# Calculation of USE for unbuilt ebuilds is relatively
@@ -3783,7 +3988,7 @@ class depgraph(object):
if atom.use:
matched_pkgs_ignore_use.append(pkg)
- if allow_use_changes and not pkg.built:
+ if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
target_use = {}
for flag in atom.use.enabled:
target_use[flag] = True
@@ -3852,6 +4057,7 @@ class depgraph(object):
e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
if not e_pkg:
break
+
# Use PackageSet.findAtomForPackage()
# for PROVIDE support.
if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
@@ -3872,7 +4078,8 @@ class depgraph(object):
if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
("--newuse" in self._frozen_config.myopts or \
"--reinstall" in self._frozen_config.myopts or \
- "--binpkg-respect-use" in self._frozen_config.myopts):
+ (not installed and self._dynamic_config.myparams.get(
+ "binpkg_respect_use") in ("y", "auto"))):
iuses = pkg.iuse.all
old_use = self._pkg_use_enabled(pkg)
if myeb:
@@ -3886,9 +4093,11 @@ class depgraph(object):
cur_iuse = iuses
if myeb and not usepkgonly and not useoldpkg:
cur_iuse = myeb.iuse.all
- if self._reinstall_for_flags(forced_flags,
- old_use, iuses,
- now_use, cur_iuse):
+ reinstall_for_flags = self._reinstall_for_flags(pkg,
+ forced_flags, old_use, iuses, now_use, cur_iuse)
+ if reinstall_for_flags:
+ if not pkg.installed:
+ self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
break
# Compare current config to installed package
# and do not reinstall if possible.
@@ -3905,7 +4114,7 @@ class depgraph(object):
cur_use = self._pkg_use_enabled(pkg)
cur_iuse = pkg.iuse.all
reinstall_for_flags = \
- self._reinstall_for_flags(
+ self._reinstall_for_flags(pkg,
forced_flags, old_use, old_iuse,
cur_use, cur_iuse)
if reinstall_for_flags:
@@ -4002,21 +4211,16 @@ class depgraph(object):
if avoid_update:
for pkg in matched_packages:
- if pkg.installed and self._pkg_visibility_check(pkg, \
- allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes,
- allow_unmasks=allow_unmasks):
+ if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
return pkg, existing_node
visible_matches = []
if matched_oldpkg:
visible_matches = [pkg.cpv for pkg in matched_oldpkg \
- if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
+ if self._pkg_visibility_check(pkg, autounmask_level)]
if not visible_matches:
visible_matches = [pkg.cpv for pkg in matched_packages \
- if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
+ if self._pkg_visibility_check(pkg, autounmask_level)]
if visible_matches:
bestmatch = portage.best(visible_matches)
else:
@@ -4046,11 +4250,12 @@ class depgraph(object):
"""
Select packages that are installed.
"""
- vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
- matches = vardb.match_pkgs(atom)
+ matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
+ "installed", atom))
if not matches:
return None, None
if len(matches) > 1:
+ matches.reverse() # ascending order
unmasked = [pkg for pkg in matches if \
self._pkg_visibility_check(pkg)]
if unmasked:
@@ -4088,11 +4293,10 @@ class depgraph(object):
"recurse" not in self._dynamic_config.myparams:
return 1
- if "complete" not in self._dynamic_config.myparams:
- # Automatically enable complete mode if there are any
- # downgrades, since they often break dependencies
- # (like in bug #353613).
- have_downgrade = False
+ if "complete" not in self._dynamic_config.myparams and \
+ self._dynamic_config.myparams.get("complete_if_new_ver", "y") == "y":
+ # Enable complete mode if an installed package version will change.
+ version_change = False
for node in self._dynamic_config.digraph:
if not isinstance(node, Package) or \
node.operation != "merge":
@@ -4100,16 +4304,15 @@ class depgraph(object):
vardb = self._frozen_config.roots[
node.root].trees["vartree"].dbapi
inst_pkg = vardb.match_pkgs(node.slot_atom)
- if inst_pkg and inst_pkg[0] > node:
- have_downgrade = True
+ if inst_pkg and (inst_pkg[0] > node or inst_pkg[0] < node):
+ version_change = True
break
- if have_downgrade:
+ if version_change:
self._dynamic_config.myparams["complete"] = True
- else:
- # Skip complete graph mode, in order to avoid consuming
- # enough time to disturb users.
- return 1
+
+ if "complete" not in self._dynamic_config.myparams:
+ return 1
self._load_vdb()
@@ -4137,7 +4340,8 @@ class depgraph(object):
args = self._dynamic_config._initial_arg_list[:]
for root in self._frozen_config.roots:
if root != self._frozen_config.target_root and \
- "remove" in self._dynamic_config.myparams:
+ ("remove" in self._dynamic_config.myparams or
+ self._frozen_config.myopts.get("--root-deps") is not None):
# Only pull in deps for the relevant root.
continue
depgraph_sets = self._dynamic_config.sets[root]
@@ -4265,9 +4469,6 @@ class depgraph(object):
"--nodeps" in self._frozen_config.myopts:
return True
- complete = "complete" in self._dynamic_config.myparams
- deep = "deep" in self._dynamic_config.myparams
-
if True:
# Pull in blockers from all installed packages that haven't already
# been pulled into the depgraph, in order to ensure that they are
@@ -4281,11 +4482,14 @@ class depgraph(object):
# are already built.
dep_keys = ["RDEPEND", "PDEPEND"]
for myroot in self._frozen_config.trees:
+
+ if self._frozen_config.myopts.get("--root-deps") is not None and \
+ myroot != self._frozen_config.target_root:
+ continue
+
vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
- portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[myroot]
root_config = self._frozen_config.roots[myroot]
- dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
final_db = self._dynamic_config.mydbapi[myroot]
blocker_cache = BlockerCache(myroot, vardb)
@@ -4304,7 +4508,8 @@ class depgraph(object):
# packages masked by license, since the user likely wants
# to adjust ACCEPT_LICENSE.
if pkg in final_db:
- if not self._pkg_visibility_check(pkg) and \
+ if not self._pkg_visibility_check(pkg,
+ trust_graph=False) and \
(pkg_in_graph or 'LICENSE' in pkg.masks):
self._dynamic_config._masked_installed.add(pkg)
else:
@@ -4381,7 +4586,7 @@ class depgraph(object):
# matches (this can happen if an atom lacks a
# category).
show_invalid_depstring_notice(
- pkg, depstr, str(e))
+ pkg, depstr, _unicode_decode("%s") % (e,))
del e
raise
if not success:
@@ -4412,7 +4617,8 @@ class depgraph(object):
except portage.exception.InvalidAtom as e:
depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
show_invalid_depstring_notice(
- pkg, depstr, "Invalid Atom: %s" % (e,))
+ pkg, depstr,
+ _unicode_decode("Invalid Atom: %s") % (e,))
return False
for cpv in stale_cache:
del blocker_cache[cpv]
@@ -4852,15 +5058,6 @@ class depgraph(object):
if replacement_portage == running_portage:
replacement_portage = None
- if replacement_portage is not None and \
- (running_portage is None or \
- running_portage.cpv != replacement_portage.cpv or \
- '9999' in replacement_portage.cpv or \
- 'git' in replacement_portage.inherited or \
- 'git-2' in replacement_portage.inherited):
- # update from running_portage to replacement_portage asap
- asap_nodes.append(replacement_portage)
-
if running_portage is not None:
try:
portage_rdepend = self._select_atoms_highest_available(
@@ -5668,6 +5865,8 @@ class depgraph(object):
"""
autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
+ autounmask_unrestricted_atoms = \
+ self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
quiet = "--quiet" in self._frozen_config.myopts
pretend = "--pretend" in self._frozen_config.myopts
ask = "--ask" in self._frozen_config.myopts
@@ -5703,6 +5902,7 @@ class depgraph(object):
#Set of roots we have autounmask changes for.
roots = set()
+ masked_by_missing_keywords = False
unstable_keyword_msg = {}
for pkg in self._dynamic_config._needed_unstable_keywords:
self._show_merge_list()
@@ -5718,12 +5918,17 @@ class depgraph(object):
if reason.unmask_hint and \
reason.unmask_hint.key == 'unstable keyword':
keyword = reason.unmask_hint.value
+ if keyword == "**":
+ masked_by_missing_keywords = True
unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
- if is_latest:
- unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
- elif is_latest_in_slot:
- unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+ if autounmask_unrestricted_atoms:
+ if is_latest:
+ unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
+ elif is_latest_in_slot:
+ unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+ else:
+ unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
else:
unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
@@ -5757,10 +5962,13 @@ class depgraph(object):
comment.splitlines() if line]
for line in comment:
p_mask_change_msg[root].append("%s\n" % line)
- if is_latest:
- p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
- elif is_latest_in_slot:
- p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+ if autounmask_unrestricted_atoms:
+ if is_latest:
+ p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
+ elif is_latest_in_slot:
+ p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+ else:
+ p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
else:
p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
@@ -5893,33 +6101,41 @@ class depgraph(object):
write_to_file = not problems
+ def format_msg(lines):
+ lines = lines[:]
+ for i, line in enumerate(lines):
+ if line.startswith("#"):
+ continue
+ lines[i] = colorize("INFORM", line.rstrip()) + "\n"
+ return "".join(lines)
+
for root in roots:
settings = self._frozen_config.roots[root].settings
abs_user_config = os.path.join(
settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
if len(roots) > 1:
- writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+ writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
if root in unstable_keyword_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
+ writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout("".join(unstable_keyword_msg[root]), noiselevel=-1)
+ writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
if root in p_mask_change_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
+ writemsg("\nThe following " + colorize("BAD", "mask changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout("".join(p_mask_change_msg[root]), noiselevel=-1)
+ writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
if root in use_changes_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
+ writemsg("\nThe following " + colorize("BAD", "USE changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout("".join(use_changes_msg[root]), noiselevel=-1)
+ writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
if root in license_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
+ writemsg("\nThe following " + colorize("BAD", "license changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout("".join(license_msg[root]), noiselevel=-1)
+ writemsg(format_msg(license_msg[root]), noiselevel=-1)
protect_obj = {}
if write_to_file:
@@ -5948,7 +6164,7 @@ class depgraph(object):
if protect_obj[root].isprotected(file_to_write_to):
# We want to force new_protect_filename to ensure
# that the user will see all our changes via
- # etc-update, even if file_to_write_to doesn't
+ # dispatch-conf, even if file_to_write_to doesn't
# exist yet, so we specify force=True.
file_to_write_to = new_protect_filename(file_to_write_to,
force=True)
@@ -5957,20 +6173,16 @@ class depgraph(object):
except PortageException:
problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
- if not quiet and \
- (unstable_keyword_msg or \
- p_mask_change_msg or \
- use_changes_msg or \
- license_msg):
+ if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
msg = [
"",
- "NOTE: This --autounmask behavior can be disabled by setting",
- " EMERGE_DEFAULT_OPTS=\"--autounmask=n\" in make.conf."
+ "NOTE: The --autounmask-keep-masks option will prevent emerge",
+ " from creating package.unmask or ** keyword changes."
]
for line in msg:
if line:
line = colorize("INFORM", line)
- writemsg_stdout(line + "\n", noiselevel=-1)
+ writemsg(line + "\n", noiselevel=-1)
if ask and write_to_file and file_to_write_to:
prompt = "\nWould you like to add these " + \
@@ -6002,14 +6214,14 @@ class depgraph(object):
file_to_write_to.get((abs_user_config, "package.license")))
if problems:
- writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
+ writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
noiselevel=-1)
- writemsg_stdout("".join(problems), noiselevel=-1)
+ writemsg("".join(problems), noiselevel=-1)
elif write_to_file and roots:
- writemsg_stdout("\nAutounmask changes successfully written. Remember to run etc-update.\n", \
+ writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
noiselevel=-1)
elif not pretend and not autounmask_write and roots:
- writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
+ writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
noiselevel=-1)
@@ -6020,49 +6232,25 @@ class depgraph(object):
the merge list where it is most likely to be seen, but if display()
is not going to be called then this method should be called explicitly
to ensure that the user is notified of problems with the graph.
-
- All output goes to stderr, except for unsatisfied dependencies which
- go to stdout for parsing by programs such as autounmask.
"""
- # Note that show_masked_packages() sends its output to
- # stdout, and some programs such as autounmask parse the
- # output in cases when emerge bails out. However, when
- # show_masked_packages() is called for installed packages
- # here, the message is a warning that is more appropriate
- # to send to stderr, so temporarily redirect stdout to
- # stderr. TODO: Fix output code so there's a cleaner way
- # to redirect everything to stderr.
- sys.stdout.flush()
- sys.stderr.flush()
- stdout = sys.stdout
- try:
- sys.stdout = sys.stderr
- self._display_problems()
- finally:
- sys.stdout = stdout
- sys.stdout.flush()
- sys.stderr.flush()
-
- # This goes to stdout for parsing by programs like autounmask.
- for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
- self._show_unsatisfied_dep(*pargs, **kwargs)
-
- def _display_problems(self):
if self._dynamic_config._circular_deps_for_display is not None:
self._show_circular_deps(
self._dynamic_config._circular_deps_for_display)
- # The user is only notified of a slot conflict if
- # there are no unresolvable blocker conflicts.
- if self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ # The slot conflict display has better noise reduction than
+ # the unsatisfied blockers display, so skip unsatisfied blockers
+ # display if there are slot conflicts (see bug #385391).
+ if self._dynamic_config._slot_collision_info:
+ self._show_slot_collision_notice()
+ elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
self._show_unsatisfied_blockers(
self._dynamic_config._unsatisfied_blockers_for_display)
- elif self._dynamic_config._slot_collision_info:
- self._show_slot_collision_notice()
else:
self._show_missed_update()
+ self._show_ignored_binaries()
+
self._display_autounmask()
# TODO: Add generic support for "set problem" handlers so that
@@ -6164,6 +6352,9 @@ class depgraph(object):
show_mask_docs()
writemsg("\n", noiselevel=-1)
+ for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+ self._show_unsatisfied_dep(*pargs, **kwargs)
+
def saveNomergeFavorites(self):
"""Find atoms in favorites that are not in the mergelist and add them
to the world file if necessary."""
@@ -6184,7 +6375,6 @@ class depgraph(object):
args_set = self._dynamic_config.sets[
self._frozen_config.target_root].sets['__non_set_args__']
- portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
added_favorites = set()
for x in self._dynamic_config._set_nodes:
if x.operation != "nomerge":
@@ -6222,7 +6412,8 @@ class depgraph(object):
all_added.extend(added_favorites)
all_added.sort()
for a in all_added:
- writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
+ writemsg_stdout(
+ ">>> Recording %s in \"world\" favorites file...\n" % \
colorize("INFORM", str(a)), noiselevel=-1)
if all_added:
world_set.update(all_added)
@@ -6247,15 +6438,12 @@ class depgraph(object):
mergelist = []
favorites = resume_data.get("favorites")
- args_set = self._dynamic_config.sets[
- self._frozen_config.target_root].sets['__non_set_args__']
if isinstance(favorites, list):
args = self._load_favorites(favorites)
else:
args = []
fakedb = self._dynamic_config.mydbapi
- trees = self._frozen_config.trees
serialized_tasks = []
masked_tasks = []
for x in mergelist:
@@ -6552,38 +6740,43 @@ class _dep_check_composite_db(dbapi):
return ret
def match(self, atom):
- ret = self._match_cache.get(atom)
+ cache_key = (atom, atom.unevaluated_atom)
+ ret = self._match_cache.get(cache_key)
if ret is not None:
return ret[:]
+
+ ret = []
pkg, existing = self._depgraph._select_package(self._root, atom)
- if not pkg:
- ret = []
- else:
- # Return the highest available from select_package() as well as
- # any matching slots in the graph db.
+
+ if pkg is not None and self._visible(pkg):
+ self._cpv_pkg_map[pkg.cpv] = pkg
+ ret.append(pkg.cpv)
+
+ if pkg is not None and \
+ atom.slot is None and \
+ pkg.cp.startswith("virtual/") and \
+ (("remove" not in self._depgraph._dynamic_config.myparams and
+ "--update" not in self._depgraph._frozen_config.myopts) or
+ not ret or
+ not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
+ # For new-style virtual lookahead that occurs inside dep_check()
+ # for bug #141118, examine all slots. This is needed so that newer
+ # slots will not unnecessarily be pulled in when a satisfying lower
+ # slot is already installed. For example, if virtual/jdk-1.5 is
+ # satisfied via gcj-jdk then there's no need to pull in a newer
+ # slot to satisfy a virtual/jdk dependency, unless --update is
+ # enabled.
slots = set()
- slots.add(pkg.metadata["SLOT"])
- if pkg.cp.startswith("virtual/"):
- # For new-style virtual lookahead that occurs inside
- # dep_check(), examine all slots. This is needed
- # so that newer slots will not unnecessarily be pulled in
- # when a satisfying lower slot is already installed. For
- # example, if virtual/jdk-1.4 is satisfied via kaffe then
- # there's no need to pull in a newer slot to satisfy a
- # virtual/jdk dependency.
- for db, pkg_type, built, installed, db_keys in \
- self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
- for cpv in db.match(atom):
- if portage.cpv_getkey(cpv) != pkg.cp:
- continue
- slots.add(db.aux_get(cpv, ["SLOT"])[0])
- ret = []
- if self._visible(pkg):
- self._cpv_pkg_map[pkg.cpv] = pkg
- ret.append(pkg.cpv)
- slots.remove(pkg.metadata["SLOT"])
+ slots.add(pkg.slot)
+ for virt_pkg in self._depgraph._iter_match_pkgs_any(
+ self._depgraph._frozen_config.roots[self._root], atom):
+ if virt_pkg.cp != pkg.cp:
+ continue
+ slots.add(virt_pkg.slot)
+
+ slots.remove(pkg.slot)
while slots:
- slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
+ slot_atom = atom.with_slot(slots.pop())
pkg, existing = self._depgraph._select_package(
self._root, slot_atom)
if not pkg:
@@ -6592,9 +6785,11 @@ class _dep_check_composite_db(dbapi):
continue
self._cpv_pkg_map[pkg.cpv] = pkg
ret.append(pkg.cpv)
- if ret:
+
+ if len(ret) > 1:
self._cpv_sort_ascending(ret)
- self._match_cache[atom] = ret
+
+ self._match_cache[cache_key] = ret
return ret[:]
def _visible(self, pkg):
@@ -6650,7 +6845,7 @@ class _dep_check_composite_db(dbapi):
# Note: highest_visible is not necessarily the real highest
# visible, especially when --update is not enabled, so use
# < operator instead of !=.
- if pkg < highest_visible:
+ if highest_visible is not None and pkg < highest_visible:
return False
elif in_graph != pkg:
# Mask choices for packages that would trigger a slot
@@ -6832,7 +7027,7 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
TODO: Return reasons for dropped_tasks, for display/logging.
@rtype: tuple
- @returns: (success, depgraph, dropped_tasks)
+ @return: (success, depgraph, dropped_tasks)
"""
skip_masked = True
skip_unsatisfied = True
@@ -6869,12 +7064,12 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
if not isinstance(parent_node, Package) \
or parent_node.operation not in ("merge", "nomerge"):
continue
- unsatisfied = \
- graph.child_nodes(parent_node,
- ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
- if pkg in unsatisfied:
- unsatisfied_parents[parent_node] = parent_node
- unsatisfied_stack.append(parent_node)
+ # We need to traverse all priorities here, in order to
+ # ensure that a package with an unsatisfied depenedency
+ # won't get pulled in, even indirectly via a soft
+ # dependency.
+ unsatisfied_parents[parent_node] = parent_node
+ unsatisfied_stack.append(parent_node)
unsatisfied_tuples = frozenset(tuple(parent_node)
for parent_node in unsatisfied_parents
@@ -6907,7 +7102,6 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
def get_mask_info(root_config, cpv, pkgsettings,
db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
- eapi_masked = False
try:
metadata = dict(zip(db_keys,
db.aux_get(cpv, db_keys, myrepo=myrepo)))
@@ -6918,8 +7112,6 @@ def get_mask_info(root_config, cpv, pkgsettings,
mreasons = ["corruption"]
else:
eapi = metadata['EAPI']
- if eapi[:1] == '-':
- eapi = eapi[1:]
if not portage.eapi_is_supported(eapi):
mreasons = ['EAPI %s' % eapi]
else:
@@ -6976,10 +7168,11 @@ def show_masked_packages(masked_packages):
# above via mreasons.
pass
- writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
+ writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
+ noiselevel=-1)
if comment and comment not in shown_comments:
- writemsg_stdout(filename + ":\n" + comment + "\n",
+ writemsg(filename + ":\n" + comment + "\n",
noiselevel=-1)
shown_comments.add(comment)
portdb = root_config.trees["porttree"].dbapi
@@ -6989,13 +7182,14 @@ def show_masked_packages(masked_packages):
continue
msg = ("A copy of the '%s' license" + \
" is located at '%s'.\n\n") % (l, l_path)
- writemsg_stdout(msg, noiselevel=-1)
+ writemsg(msg, noiselevel=-1)
shown_licenses.add(l)
return have_eapi_mask
def show_mask_docs():
- writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
- writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
+ writemsg("For more information, see the MASKED PACKAGES "
+ "section in the emerge\n", noiselevel=-1)
+ writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
def show_blocker_docs_link():
writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
@@ -7017,7 +7211,7 @@ def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
pkg.metadata["CHOST"]))
if pkg.invalid:
- for msg_type, msgs in pkg.invalid.items():
+ for msgs in pkg.invalid.values():
for msg in msgs:
mreasons.append(
_MaskReason("invalid", "invalid: %s" % (msg,)))