aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'portage_with_autodep/pym/portage/dbapi')
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_MergeProcess.py214
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyobin6813 -> 7418 bytes
-rw-r--r--portage_with_autodep/pym/portage/dbapi/__init__.py185
-rw-r--r--portage_with_autodep/pym/portage/dbapi/__init__.pyobin11096 -> 15582 bytes
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py12
-rw-r--r--portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyobin1943 -> 2184 bytes
-rw-r--r--portage_with_autodep/pym/portage/dbapi/bintree.py357
-rw-r--r--portage_with_autodep/pym/portage/dbapi/bintree.pyobin39953 -> 42052 bytes
-rw-r--r--portage_with_autodep/pym/portage/dbapi/cpv_expand.py4
-rw-r--r--portage_with_autodep/pym/portage/dbapi/cpv_expand.pyobin2373 -> 2467 bytes
-rw-r--r--portage_with_autodep/pym/portage/dbapi/dep_expand.py6
-rw-r--r--portage_with_autodep/pym/portage/dbapi/dep_expand.pyobin1500 -> 1606 bytes
-rw-r--r--portage_with_autodep/pym/portage/dbapi/porttree.py152
-rw-r--r--portage_with_autodep/pym/portage/dbapi/porttree.pyobin33775 -> 35582 bytes
-rw-r--r--portage_with_autodep/pym/portage/dbapi/vartree.py754
-rw-r--r--portage_with_autodep/pym/portage/dbapi/vartree.pyobin120778 -> 129927 bytes
-rw-r--r--portage_with_autodep/pym/portage/dbapi/virtual.py56
-rw-r--r--portage_with_autodep/pym/portage/dbapi/virtual.pyobin5813 -> 6046 bytes
18 files changed, 1183 insertions, 557 deletions
diff --git a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
index b5f6a0b..956dbb9 100644
--- a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
+++ b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
@@ -1,7 +1,8 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
+import platform
import signal
import sys
import traceback
@@ -10,10 +11,11 @@ import errno
import fcntl
import portage
from portage import os, _unicode_decode
+from portage.util._ctypes import find_library
import portage.elog.messages
-from _emerge.SpawnProcess import SpawnProcess
+from portage.util._async.ForkProcess import ForkProcess
-class MergeProcess(SpawnProcess):
+class MergeProcess(ForkProcess):
"""
Merge packages in a subprocess, so the Scheduler can run in the main
thread while files are moved or copied asynchronously.
@@ -40,11 +42,20 @@ class MergeProcess(SpawnProcess):
settings.reset()
settings.setcpv(cpv, mydb=self.mydbapi)
+ # This caches the libc library lookup in the current
+ # process, so that it's only done once rather than
+ # for each child process.
+ if platform.system() == "Linux" and \
+ "merge-sync" in settings.features:
+ find_library("c")
+
# Inherit stdin by default, so that the pdb SIGUSR1
# handler is usable for the subprocess.
if self.fd_pipes is None:
self.fd_pipes = {}
- self.fd_pipes.setdefault(0, sys.stdin.fileno())
+ else:
+ self.fd_pipes = self.fd_pipes.copy()
+ self.fd_pipes.setdefault(0, portage._get_stdin().fileno())
super(MergeProcess, self)._start()
@@ -90,7 +101,7 @@ class MergeProcess(SpawnProcess):
reporter(msg, phase=phase, key=key, out=out)
if event & self.scheduler.IO_HUP:
- self.scheduler.unregister(self._elog_reg_id)
+ self.scheduler.source_remove(self._elog_reg_id)
self._elog_reg_id = None
os.close(self._elog_reader_fd)
self._elog_reader_fd = None
@@ -101,12 +112,24 @@ class MergeProcess(SpawnProcess):
def _spawn(self, args, fd_pipes, **kwargs):
"""
Fork a subprocess, apply local settings, and call
- dblink.merge().
+ dblink.merge(). TODO: Share code with ForkProcess.
"""
elog_reader_fd, elog_writer_fd = os.pipe()
+
fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(elog_reader_fd, fcntl.F_SETFD,
+ fcntl.fcntl(elog_reader_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
blockers = None
if self.blockers is not None:
# Query blockers in the main process, since closing
@@ -116,10 +139,9 @@ class MergeProcess(SpawnProcess):
blockers = self.blockers()
mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
treetype=self.treetype, vartree=self.vartree,
- blockers=blockers, scheduler=self.scheduler,
- pipe=elog_writer_fd)
+ blockers=blockers, pipe=elog_writer_fd)
fd_pipes[elog_writer_fd] = elog_writer_fd
- self._elog_reg_id = self.scheduler.register(elog_reader_fd,
+ self._elog_reg_id = self.scheduler.io_add_watch(elog_reader_fd,
self._registered_events, self._elog_output_handler)
# If a concurrent emerge process tries to install a package
@@ -133,88 +155,100 @@ class MergeProcess(SpawnProcess):
if not self.unmerge:
counter = self.vartree.dbapi.counter_tick()
- pid = os.fork()
- if pid != 0:
- if not isinstance(pid, int):
- raise AssertionError(
- "fork returned non-integer: %s" % (repr(pid),))
-
- os.close(elog_writer_fd)
- self._elog_reader_fd = elog_reader_fd
- self._buf = ""
- self._elog_keys = set()
-
- # invalidate relevant vardbapi caches
- if self.vartree.dbapi._categories is not None:
- self.vartree.dbapi._categories = None
- self.vartree.dbapi._pkgs_changed = True
- self.vartree.dbapi._clear_pkg_cache(mylink)
-
- portage.process.spawned_pids.append(pid)
- return [pid]
-
- os.close(elog_reader_fd)
- portage.locks._close_fds()
- # Disable close_fds since we don't exec (see _setup_pipes docstring).
- portage.process._setup_pipes(fd_pipes, close_fds=False)
-
- # Use default signal handlers since the ones inherited
- # from the parent process are irrelevant here.
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
- portage.output.havecolor = self.settings.get('NOCOLOR') \
- not in ('yes', 'true')
-
- # In this subprocess we want mylink._display_merge() to use
- # stdout/stderr directly since they are pipes. This behavior
- # is triggered when mylink._scheduler is None.
- mylink._scheduler = None
-
- # Avoid wastful updates of the vdb cache.
- self.vartree.dbapi._flush_cache_enabled = False
-
- # In this subprocess we don't want PORTAGE_BACKGROUND to
- # suppress stdout/stderr output since they are pipes. We
- # also don't want to open PORTAGE_LOG_FILE, since it will
- # already be opened by the parent process, so we set the
- # "subprocess" value for use in conditional logging code
- # involving PORTAGE_LOG_FILE.
- if not self.unmerge:
- # unmerge phases have separate logs
- if self.settings.get("PORTAGE_BACKGROUND") == "1":
- self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
- else:
- self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
- self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
- self.settings["PORTAGE_BACKGROUND"] = "subprocess"
- self.settings.backup_changes("PORTAGE_BACKGROUND")
-
- rval = 1
+ parent_pid = os.getpid()
+ pid = None
try:
- if self.unmerge:
- if not mylink.exists():
- rval = os.EX_OK
- elif mylink.unmerge(
- ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
- mylink.lockdb()
- try:
- mylink.delete()
- finally:
- mylink.unlockdb()
- rval = os.EX_OK
- else:
- rval = mylink.merge(self.pkgloc, self.infloc,
- myebuild=self.myebuild, mydbapi=self.mydbapi,
- prev_mtimes=self.prev_mtimes, counter=counter)
- except SystemExit:
- raise
- except:
- traceback.print_exc()
+ pid = os.fork()
+
+ if pid != 0:
+ if not isinstance(pid, int):
+ raise AssertionError(
+ "fork returned non-integer: %s" % (repr(pid),))
+
+ os.close(elog_writer_fd)
+ self._elog_reader_fd = elog_reader_fd
+ self._buf = ""
+ self._elog_keys = set()
+ # Discard messages which will be collected by the subprocess,
+ # in order to avoid duplicates (bug #446136).
+ portage.elog.messages.collect_messages(key=mylink.mycpv)
+
+ # invalidate relevant vardbapi caches
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+ self.vartree.dbapi._pkgs_changed = True
+ self.vartree.dbapi._clear_pkg_cache(mylink)
+
+ return [pid]
+
+ os.close(elog_reader_fd)
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ portage.locks._close_fds()
+ # We don't exec, so use close_fds=False
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes(fd_pipes, close_fds=False)
+
+ portage.output.havecolor = self.settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ # Avoid wastful updates of the vdb cache.
+ self.vartree.dbapi._flush_cache_enabled = False
+
+ # In this subprocess we don't want PORTAGE_BACKGROUND to
+ # suppress stdout/stderr output since they are pipes. We
+ # also don't want to open PORTAGE_LOG_FILE, since it will
+ # already be opened by the parent process, so we set the
+ # "subprocess" value for use in conditional logging code
+ # involving PORTAGE_LOG_FILE.
+ if not self.unmerge:
+ # unmerge phases have separate logs
+ if self.settings.get("PORTAGE_BACKGROUND") == "1":
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
+ else:
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
+ self.settings["PORTAGE_BACKGROUND"] = "subprocess"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+
+ rval = 1
+ try:
+ if self.unmerge:
+ if not mylink.exists():
+ rval = os.EX_OK
+ elif mylink.unmerge(
+ ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ rval = os.EX_OK
+ else:
+ rval = mylink.merge(self.pkgloc, self.infloc,
+ myebuild=self.myebuild, mydbapi=self.mydbapi,
+ prev_mtimes=self.prev_mtimes, counter=counter)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ # os._exit() skips stderr flush!
+ sys.stderr.flush()
+ finally:
+ os._exit(rval)
+
finally:
- # Call os._exit() from finally block, in order to suppress any
- # finally blocks from earlier in the call stack. See bug #345289.
- os._exit(rval)
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
def _unregister(self):
"""
@@ -231,7 +265,7 @@ class MergeProcess(SpawnProcess):
self._unlock_vdb()
if self._elog_reg_id is not None:
- self.scheduler.unregister(self._elog_reg_id)
+ self.scheduler.source_remove(self._elog_reg_id)
self._elog_reg_id = None
if self._elog_reader_fd is not None:
os.close(self._elog_reader_fd)
diff --git a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyo b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyo
index 5839ad8..abee4be 100644
--- a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyo
+++ b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/dbapi/__init__.py b/portage_with_autodep/pym/portage/dbapi/__init__.py
index a1c5c56..a20a1e8 100644
--- a/portage_with_autodep/pym/portage/dbapi/__init__.py
+++ b/portage_with_autodep/pym/portage/dbapi/__init__.py
@@ -1,6 +1,8 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["dbapi"]
import re
@@ -8,7 +10,7 @@ import re
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.dbapi.dep_expand:dep_expand@_dep_expand',
- 'portage.dep:match_from_list',
+ 'portage.dep:Atom,match_from_list,_match_slot',
'portage.output:colorize',
'portage.util:cmp_sort_key,writemsg',
'portage.versions:catsplit,catpkgsplit,vercmp,_pkg_str',
@@ -16,14 +18,19 @@ portage.proxy.lazyimport.lazyimport(globals(),
from portage import os
from portage import auxdbkeys
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidData
from portage.localization import _
+from _emerge.Package import Package
class dbapi(object):
- _category_re = re.compile(r'^\w[-.+\w]*$')
+ _category_re = re.compile(r'^\w[-.+\w]*$', re.UNICODE)
_categories = None
_use_mutable = False
_known_keys = frozenset(x for x in auxdbkeys
if not x.startswith("UNUSED_0"))
+ _pkg_str_aux_keys = ("EAPI", "KEYWORDS", "SLOT", "repository")
+
def __init__(self):
pass
@@ -125,29 +132,52 @@ class dbapi(object):
def _iter_match(self, atom, cpv_iter):
cpv_iter = iter(match_from_list(atom, cpv_iter))
+ if atom.repo:
+ cpv_iter = self._iter_match_repo(atom, cpv_iter)
if atom.slot:
cpv_iter = self._iter_match_slot(atom, cpv_iter)
if atom.unevaluated_atom.use:
cpv_iter = self._iter_match_use(atom, cpv_iter)
- if atom.repo:
- cpv_iter = self._iter_match_repo(atom, cpv_iter)
return cpv_iter
+ def _pkg_str(self, cpv, repo):
+ """
+ This is used to contruct _pkg_str instances on-demand during
+ matching. If cpv is a _pkg_str instance with slot attribute,
+ then simply return it. Otherwise, fetch metadata and construct
+ a _pkg_str instance. This may raise KeyError or InvalidData.
+ """
+ try:
+ cpv.slot
+ except AttributeError:
+ pass
+ else:
+ return cpv
+
+ metadata = dict(zip(self._pkg_str_aux_keys,
+ self.aux_get(cpv, self._pkg_str_aux_keys, myrepo=repo)))
+
+ return _pkg_str(cpv, metadata=metadata, settings=self.settings)
+
def _iter_match_repo(self, atom, cpv_iter):
for cpv in cpv_iter:
try:
- if self.aux_get(cpv, ["repository"], myrepo=atom.repo)[0] == atom.repo:
- yield cpv
- except KeyError:
- continue
+ pkg_str = self._pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ pass
+ else:
+ if pkg_str.repo == atom.repo:
+ yield pkg_str
def _iter_match_slot(self, atom, cpv_iter):
for cpv in cpv_iter:
try:
- if self.aux_get(cpv, ["SLOT"], myrepo=atom.repo)[0] == atom.slot:
- yield cpv
- except KeyError:
- continue
+ pkg_str = self._pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ pass
+ else:
+ if _match_slot(atom, pkg_str):
+ yield pkg_str
def _iter_match_use(self, atom, cpv_iter):
"""
@@ -155,7 +185,7 @@ class dbapi(object):
2) Check enabled/disabled flag states.
"""
- aux_keys = ["IUSE", "SLOT", "USE", "repository"]
+ aux_keys = ["EAPI", "IUSE", "KEYWORDS", "SLOT", "USE", "repository"]
for cpv in cpv_iter:
try:
metadata = dict(zip(aux_keys,
@@ -163,17 +193,31 @@ class dbapi(object):
except KeyError:
continue
+ try:
+ cpv.slot
+ except AttributeError:
+ try:
+ cpv = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings)
+ except InvalidData:
+ continue
+
if not self._match_use(atom, cpv, metadata):
continue
yield cpv
- def _match_use(self, atom, cpv, metadata):
- iuse_implicit_match = self.settings._iuse_implicit_match
- iuse = frozenset(x.lstrip('+-') for x in metadata["IUSE"].split())
+ def _match_use(self, atom, pkg, metadata):
+ eapi_attrs = _get_eapi_attrs(metadata["EAPI"])
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = self.settings._iuse_effective_match
+ else:
+ iuse_implicit_match = self.settings._iuse_implicit_match
+ usealiases = self.settings._use_manager.getUseAliases(pkg)
+ iuse = Package._iuse(None, metadata["IUSE"].split(), iuse_implicit_match, usealiases, metadata["EAPI"])
for x in atom.unevaluated_atom.use.required:
- if x not in iuse and not iuse_implicit_match(x):
+ if iuse.get_real_flag(x) is None:
return False
if atom.use is None:
@@ -183,45 +227,54 @@ class dbapi(object):
# Use IUSE to validate USE settings for built packages,
# in case the package manager that built this package
# failed to do that for some reason (or in case of
- # data corruption).
- use = frozenset(x for x in metadata["USE"].split()
- if x in iuse or iuse_implicit_match(x))
- missing_enabled = atom.use.missing_enabled.difference(iuse)
- missing_disabled = atom.use.missing_disabled.difference(iuse)
-
- if atom.use.enabled:
- if atom.use.enabled.intersection(missing_disabled):
+ # data corruption). The enabled flags must be consistent
+ # with implicit IUSE, in order to avoid potential
+ # inconsistencies in USE dep matching (see bug #453400).
+ use = frozenset(x for x in metadata["USE"].split() if iuse.get_real_flag(x) is not None)
+ missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
+ missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
+ enabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.enabled)
+ disabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.disabled)
+
+ if enabled:
+ if any(x in enabled for x in missing_disabled):
return False
- need_enabled = atom.use.enabled.difference(use)
+ need_enabled = enabled.difference(use)
if need_enabled:
- need_enabled = need_enabled.difference(missing_enabled)
- if need_enabled:
+ if any(x not in missing_enabled for x in need_enabled):
return False
- if atom.use.disabled:
- if atom.use.disabled.intersection(missing_enabled):
+ if disabled:
+ if any(x in disabled for x in missing_enabled):
return False
- need_disabled = atom.use.disabled.intersection(use)
+ need_disabled = disabled.intersection(use)
if need_disabled:
- need_disabled = need_disabled.difference(missing_disabled)
- if need_disabled:
+ if any(x not in missing_disabled for x in need_disabled):
return False
elif not self.settings.local_config:
# Check masked and forced flags for repoman.
- if hasattr(cpv, 'slot'):
- pkg = cpv
- else:
- pkg = _pkg_str(cpv, slot=metadata["SLOT"],
- repo=metadata.get("repository"))
- usemask = self.settings._getUseMask(pkg)
- if usemask.intersection(atom.use.enabled):
+ usemask = self.settings._getUseMask(pkg,
+ stable=self.settings._parent_stable)
+ if any(x in usemask for x in atom.use.enabled):
return False
- useforce = self.settings._getUseForce(pkg).difference(usemask)
- if useforce.intersection(atom.use.disabled):
+ useforce = self.settings._getUseForce(pkg,
+ stable=self.settings._parent_stable)
+ if any(x in useforce and x not in usemask
+ for x in atom.use.disabled):
return False
+ # Check unsatisfied use-default deps
+ if atom.use.enabled:
+ missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
+ if any(x in atom.use.enabled for x in missing_disabled):
+ return False
+ if atom.use.disabled:
+ missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
+ if any(x in atom.use.disabled for x in missing_enabled):
+ return False
+
return True
def invalidentry(self, mypath):
@@ -249,23 +302,30 @@ class dbapi(object):
maxval = len(cpv_all)
aux_get = self.aux_get
aux_update = self.aux_update
- meta_keys = ["DEPEND", "RDEPEND", "PDEPEND", "PROVIDE", 'repository']
+ update_keys = Package._dep_keys + ("PROVIDE",)
+ meta_keys = update_keys + self._pkg_str_aux_keys
repo_dict = None
if isinstance(updates, dict):
repo_dict = updates
- from portage.update import update_dbentries
if onUpdate:
onUpdate(maxval, 0)
if onProgress:
onProgress(maxval, 0)
for i, cpv in enumerate(cpv_all):
- metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
- repo = metadata.pop('repository')
+ try:
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ except KeyError:
+ continue
+ try:
+ pkg = _pkg_str(cpv, metadata=metadata, settings=self.settings)
+ except InvalidData:
+ continue
+ metadata = dict((k, metadata[k]) for k in update_keys)
if repo_dict is None:
updates_list = updates
else:
try:
- updates_list = repo_dict[repo]
+ updates_list = repo_dict[pkg.repo]
except KeyError:
try:
updates_list = repo_dict['DEFAULT']
@@ -275,7 +335,8 @@ class dbapi(object):
if not updates_list:
continue
- metadata_updates = update_dbentries(updates_list, metadata)
+ metadata_updates = \
+ portage.update_dbentries(updates_list, metadata, parent=pkg)
if metadata_updates:
aux_update(cpv, metadata_updates)
if onUpdate:
@@ -286,27 +347,39 @@ class dbapi(object):
def move_slot_ent(self, mylist, repo_match=None):
"""This function takes a sequence:
Args:
- mylist: a sequence of (package, originalslot, newslot)
+ mylist: a sequence of (atom, originalslot, newslot)
repo_match: callable that takes single repo_name argument
and returns True if the update should be applied
Returns:
The number of slotmoves this function did
"""
- pkg = mylist[1]
+ atom = mylist[1]
origslot = mylist[2]
newslot = mylist[3]
- origmatches = self.match(pkg)
+
+ try:
+ atom.with_slot
+ except AttributeError:
+ atom = Atom(atom).with_slot(origslot)
+ else:
+ atom = atom.with_slot(origslot)
+
+ origmatches = self.match(atom)
moves = 0
if not origmatches:
return moves
for mycpv in origmatches:
- slot = self.aux_get(mycpv, ["SLOT"])[0]
- if slot != origslot:
+ try:
+ mycpv = self._pkg_str(mycpv, atom.repo)
+ except (KeyError, InvalidData):
continue
- if repo_match is not None \
- and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
+ if repo_match is not None and not repo_match(mycpv.repo):
continue
moves += 1
+ if "/" not in newslot and \
+ mycpv.sub_slot and \
+ mycpv.sub_slot not in (mycpv.slot, newslot):
+ newslot = "%s/%s" % (newslot, mycpv.sub_slot)
mydata = {"SLOT": newslot+"\n"}
self.aux_update(mycpv, mydata)
return moves
diff --git a/portage_with_autodep/pym/portage/dbapi/__init__.pyo b/portage_with_autodep/pym/portage/dbapi/__init__.pyo
index e7b494d..d4d47b2 100644
--- a/portage_with_autodep/pym/portage/dbapi/__init__.pyo
+++ b/portage_with_autodep/pym/portage/dbapi/__init__.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
index d379b4c..9aa603d 100644
--- a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
+++ b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
@@ -1,8 +1,11 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import portage
from portage.dep import Atom, _get_useflag_re
+from portage.eapi import _get_eapi_attrs
def expand_new_virt(vardb, atom):
"""
@@ -44,6 +47,7 @@ def expand_new_virt(vardb, atom):
yield atom
continue
+ eapi_attrs = _get_eapi_attrs(eapi)
# Validate IUSE and IUSE, for early detection of vardb corruption.
useflag_re = _get_useflag_re(eapi)
valid_iuse = []
@@ -54,7 +58,11 @@ def expand_new_virt(vardb, atom):
valid_iuse.append(x)
valid_iuse = frozenset(valid_iuse)
- iuse_implicit_match = vardb.settings._iuse_implicit_match
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = vardb.settings._iuse_effective_match
+ else:
+ iuse_implicit_match = vardb.settings._iuse_implicit_match
+
valid_use = []
for x in use.split():
if x in valid_iuse or iuse_implicit_match(x):
diff --git a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyo b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyo
index 6c23a7e..7884393 100644
--- a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyo
+++ b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/dbapi/bintree.py b/portage_with_autodep/pym/portage/dbapi/bintree.py
index a8027ee..b1f67ae 100644
--- a/portage_with_autodep/pym/portage/dbapi/bintree.py
+++ b/portage_with_autodep/pym/portage/dbapi/bintree.py
@@ -1,16 +1,18 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["bindbapi", "binarytree"]
import portage
portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.checksum:hashfunc_map,perform_multiple_checksums,verify_all',
+ 'portage.checksum:hashfunc_map,perform_multiple_checksums,' + \
+ 'verify_all,_apply_hash_filter,_hash_filter',
'portage.dbapi.dep_expand:dep_expand',
- 'portage.dep:dep_getkey,isjustname,match_from_list',
+ 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list',
'portage.output:EOutput,colorize',
'portage.locks:lockfile,unlockfile',
- 'portage.package.ebuild.doebuild:_vdb_use_conditional_atoms',
'portage.package.ebuild.fetch:_check_distfile,_hide_url_passwd',
'portage.update:update_dbentries',
'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
@@ -41,7 +43,9 @@ import subprocess
import sys
import tempfile
import textwrap
+import traceback
import warnings
+from gzip import GzipFile
from itertools import chain
try:
from urllib.parse import urlparse
@@ -49,8 +53,16 @@ except ImportError:
from urlparse import urlparse
if sys.hexversion >= 0x3000000:
+ _unicode = str
basestring = str
long = int
+else:
+ _unicode = unicode
+
+class UseCachedCopyOfRemoteIndex(Exception):
+ # If the local copy is recent enough
+ # then fetching the remote index can be skipped.
+ pass
class bindbapi(fakedbapi):
_known_keys = frozenset(list(fakedbapi._known_keys) + \
@@ -63,9 +75,10 @@ class bindbapi(fakedbapi):
self.cpdict={}
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
- ["BUILD_TIME", "CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
+ ["BUILD_TIME", "CHOST", "DEPEND", "EAPI",
+ "HDEPEND", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
- "RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES",
+ "RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES"
])
self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
self._aux_cache = {}
@@ -128,15 +141,15 @@ class bindbapi(fakedbapi):
if myval:
mydata[x] = " ".join(myval.split())
- if not mydata.setdefault('EAPI', _unicode_decode('0')):
- mydata['EAPI'] = _unicode_decode('0')
+ if not mydata.setdefault('EAPI', '0'):
+ mydata['EAPI'] = '0'
if cache_me:
aux_cache = self._aux_cache_slot_dict()
for x in self._aux_cache_keys:
- aux_cache[x] = mydata.get(x, _unicode_decode(''))
+ aux_cache[x] = mydata.get(x, '')
self._aux_cache[mycpv] = aux_cache
- return [mydata.get(x, _unicode_decode('')) for x in wants]
+ return [mydata.get(x, '') for x in wants]
def aux_update(self, cpv, values):
if not self.bintree.populated:
@@ -248,7 +261,7 @@ def _pkgindex_cpv_map_latest_build(pkgindex):
class binarytree(object):
"this tree scans for a list of all packages available in PKGDIR"
- def __init__(self, _unused=None, pkgdir=None,
+ def __init__(self, _unused=DeprecationWarning, pkgdir=None,
virtual=DeprecationWarning, settings=None):
if pkgdir is None:
@@ -257,11 +270,11 @@ class binarytree(object):
if settings is None:
raise TypeError("settings parameter is required")
- if _unused is not None and _unused != settings['ROOT']:
- warnings.warn("The root parameter of the "
+ if _unused is not DeprecationWarning:
+ warnings.warn("The first parameter of the "
"portage.dbapi.bintree.binarytree"
- " constructor is now unused. Use "
- "settings['ROOT'] instead.",
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
DeprecationWarning, stacklevel=2)
if virtual is not DeprecationWarning:
@@ -293,22 +306,26 @@ class binarytree(object):
self._pkgindex_keys.update(["CPV", "MTIME", "SIZE"])
self._pkgindex_aux_keys = \
["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI",
- "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
- "PROVIDE", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
+ "HDEPEND", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
+ "PROVIDE", "RESTRICT", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
"BASE_URI"]
self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
self._pkgindex_use_evaluated_keys = \
- ("LICENSE", "RDEPEND", "DEPEND",
- "PDEPEND", "PROPERTIES", "PROVIDE")
+ ("DEPEND", "HDEPEND", "LICENSE", "RDEPEND",
+ "PDEPEND", "PROPERTIES", "PROVIDE", "RESTRICT")
self._pkgindex_header_keys = set([
"ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
- "ACCEPT_PROPERTIES", "CBUILD",
+ "ACCEPT_PROPERTIES", "ACCEPT_RESTRICT", "CBUILD",
"CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
- "GENTOO_MIRRORS", "INSTALL_MASK", "SYNC", "USE"])
+ "GENTOO_MIRRORS", "INSTALL_MASK", "IUSE_IMPLICIT", "USE",
+ "USE_EXPAND", "USE_EXPAND_HIDDEN", "USE_EXPAND_IMPLICIT",
+ "USE_EXPAND_UNPREFIXED"])
self._pkgindex_default_pkg_data = {
"BUILD_TIME" : "",
+ "DEFINED_PHASES" : "",
"DEPEND" : "",
"EAPI" : "0",
+ "HDEPEND" : "",
"IUSE" : "",
"KEYWORDS": "",
"LICENSE" : "",
@@ -320,7 +337,6 @@ class binarytree(object):
"RESTRICT": "",
"SLOT" : "0",
"USE" : "",
- "DEFINED_PHASES" : "",
}
self._pkgindex_inherited_keys = ["CHOST", "repository"]
@@ -378,15 +394,24 @@ class binarytree(object):
if not origmatches:
return moves
for mycpv in origmatches:
+ try:
+ mycpv = self.dbapi._pkg_str(mycpv, None)
+ except (KeyError, InvalidData):
+ continue
mycpv_cp = portage.cpv_getkey(mycpv)
if mycpv_cp != origcp:
# Ignore PROVIDE virtual match.
continue
if repo_match is not None \
- and not repo_match(self.dbapi.aux_get(mycpv,
- ['repository'])[0]):
+ and not repo_match(mycpv.repo):
+ continue
+
+ # Use isvalidatom() to check if this move is valid for the
+ # EAPI (characters allowed in package names may vary).
+ if not isvalidatom(newcp, eapi=mycpv.eapi):
continue
- mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
+
+ mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
myoldpkg = catsplit(mycpv)[1]
mynewpkg = catsplit(mynewcpv)[1]
@@ -405,7 +430,7 @@ class binarytree(object):
moves += 1
mytbz2 = portage.xpak.tbz2(tbz2path)
mydata = mytbz2.get_data()
- updated_items = update_dbentries([mylist], mydata)
+ updated_items = update_dbentries([mylist], mydata, parent=mycpv)
mydata.update(updated_items)
mydata[b'PF'] = \
_unicode_encode(mynewpkg + "\n",
@@ -541,6 +566,20 @@ class binarytree(object):
if not os.path.isdir(path):
raise
+ def _file_permissions(self, path):
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ pass
+ else:
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o0060 & pkgdir_st.st_mode
+ try:
+ portage.util.apply_permissions(path, gid=pkgdir_gid,
+ mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ pass
+
def _move_to_all(self, cpv):
"""If the file exists, move it. Whether or not it exists, update state
for future getname() calls."""
@@ -796,9 +835,7 @@ class binarytree(object):
del pkgindex.packages[:]
pkgindex.packages.extend(iter(metadata.values()))
self._update_pkgindex_header(pkgindex.header)
- f = atomic_ofstream(self._pkgindex_file)
- pkgindex.write(f)
- f.close()
+ self._pkgindex_write(pkgindex)
if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
@@ -841,6 +878,7 @@ class binarytree(object):
if e.errno != errno.ENOENT:
raise
local_timestamp = pkgindex.header.get("TIMESTAMP", None)
+ remote_timestamp = None
rmt_idx = self._new_pkgindex()
proc = None
tmp_filename = None
@@ -849,41 +887,76 @@ class binarytree(object):
# protocols and requires the base url to have a trailing
# slash, so join manually...
url = base_url.rstrip("/") + "/Packages"
- try:
- f = _urlopen(url)
- except IOError:
- path = parsed_url.path.rstrip("/") + "/Packages"
+ f = None
+
+ # Don't use urlopen for https, since it doesn't support
+ # certificate/hostname verification (bug #469888).
+ if parsed_url.scheme not in ('https',):
+ try:
+ f = _urlopen(url, if_modified_since=local_timestamp)
+ if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
+ remote_timestamp = f.headers.get('timestamp')
+ except IOError as err:
+ if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
+ raise UseCachedCopyOfRemoteIndex()
+
+ if parsed_url.scheme in ('ftp', 'http', 'https'):
+ # This protocol is supposedly supported by urlopen,
+ # so apparently there's a problem with the url
+ # or a bug in urlopen.
+ if self.settings.get("PORTAGE_DEBUG", "0") != "0":
+ traceback.print_exc()
- if parsed_url.scheme == 'sftp':
- # The sftp command complains about 'Illegal seek' if
- # we try to make it write to /dev/stdout, so use a
- # temp file instead.
- fd, tmp_filename = tempfile.mkstemp()
- os.close(fd)
- if port is not None:
- port_args = ['-P', "%s" % (port,)]
- proc = subprocess.Popen(['sftp'] + port_args + \
- [user_passwd + host + ":" + path, tmp_filename])
- if proc.wait() != os.EX_OK:
raise
- f = open(tmp_filename, 'rb')
- elif parsed_url.scheme == 'ssh':
+
+ if f is None:
+
+ path = parsed_url.path.rstrip("/") + "/Packages"
+
+ if parsed_url.scheme == 'ssh':
+ # Use a pipe so that we can terminate the download
+ # early if we detect that the TIMESTAMP header
+ # matches that of the cached Packages file.
+ ssh_args = ['ssh']
if port is not None:
- port_args = ['-p', "%s" % (port,)]
- proc = subprocess.Popen(['ssh'] + port_args + \
- [user_passwd + host, '--', 'cat', path],
+ ssh_args.append("-p%s" % (port,))
+ # NOTE: shlex evaluates embedded quotes
+ ssh_args.extend(portage.util.shlex_split(
+ self.settings.get("PORTAGE_SSH_OPTS", "")))
+ ssh_args.append(user_passwd + host)
+ ssh_args.append('--')
+ ssh_args.append('cat')
+ ssh_args.append(path)
+
+ proc = subprocess.Popen(ssh_args,
stdout=subprocess.PIPE)
f = proc.stdout
else:
setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
fcmd = self.settings.get(setting)
if not fcmd:
- raise
+ fcmd = self.settings.get('FETCHCOMMAND')
+ if not fcmd:
+ raise EnvironmentError("FETCHCOMMAND is unset")
+
fd, tmp_filename = tempfile.mkstemp()
tmp_dirname, tmp_basename = os.path.split(tmp_filename)
os.close(fd)
- success = portage.getbinpkg.file_get(url,
- tmp_dirname, fcmd=fcmd, filename=tmp_basename)
+
+ fcmd_vars = {
+ "DISTDIR": tmp_dirname,
+ "FILE": tmp_basename,
+ "URI": url
+ }
+
+ for k in ("PORTAGE_SSH_OPTS",):
+ try:
+ fcmd_vars[k] = self.settings[k]
+ except KeyError:
+ pass
+
+ success = portage.getbinpkg.file_get(
+ fcmd=fcmd, fcmd_vars=fcmd_vars)
if not success:
raise EnvironmentError("%s failed" % (setting,))
f = open(tmp_filename, 'rb')
@@ -892,7 +965,8 @@ class binarytree(object):
_encodings['repo.content'], errors='replace')
try:
rmt_idx.readHeader(f_dec)
- remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
+ if not remote_timestamp: # in case it had not been read from HTTP header
+ remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
if not remote_timestamp:
# no timestamp in the header, something's wrong
pkgindex = None
@@ -920,6 +994,12 @@ class binarytree(object):
writemsg("\n\n!!! %s\n" % \
_("Timed out while closing connection to binhost"),
noiselevel=-1)
+ except UseCachedCopyOfRemoteIndex:
+ writemsg_stdout("\n")
+ writemsg_stdout(
+ colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
+ "\n")
+ rmt_idx = pkgindex
except EnvironmentError as e:
writemsg(_("\n\n!!! Error fetching binhost package" \
" info from '%s'\n") % _hide_url_passwd(base_url))
@@ -988,75 +1068,7 @@ class binarytree(object):
# Local package instances override remote instances.
for cpv in metadata:
self._remotepkgs.pop(cpv, None)
- continue
- try:
- chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
- if chunk_size < 8:
- chunk_size = 8
- except (ValueError, KeyError):
- chunk_size = 3000
- writemsg_stdout("\n")
- writemsg_stdout(
- colorize("GOOD", _("Fetching bininfo from ")) + \
- _hide_url_passwd(base_url) + "\n")
- remotepkgs = portage.getbinpkg.dir_get_metadata(
- base_url, chunk_size=chunk_size)
-
- for mypkg, remote_metadata in remotepkgs.items():
- mycat = remote_metadata.get("CATEGORY")
- if mycat is None:
- #old-style or corrupt package
- writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
- noiselevel=-1)
- continue
- mycat = mycat.strip()
- try:
- fullpkg = _pkg_str(mycat+"/"+mypkg[:-5])
- except InvalidData:
- writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
- noiselevel=-1)
- continue
-
- if fullpkg in metadata:
- # When using this old protocol, comparison with the remote
- # package isn't supported, so the local package is always
- # preferred even if getbinpkgsonly is enabled.
- continue
-
- if not self.dbapi._category_re.match(mycat):
- writemsg(_("!!! Remote binary package has an " \
- "unrecognized category: '%s'\n") % fullpkg,
- noiselevel=-1)
- writemsg(_("!!! '%s' has a category that is not" \
- " listed in %setc/portage/categories\n") % \
- (fullpkg, self.settings["PORTAGE_CONFIGROOT"]),
- noiselevel=-1)
- continue
- mykey = portage.cpv_getkey(fullpkg)
- try:
- # invalid tbz2's can hurt things.
- self.dbapi.cpv_inject(fullpkg)
- for k, v in remote_metadata.items():
- remote_metadata[k] = v.strip()
- remote_metadata["BASE_URI"] = base_url
-
- # Eliminate metadata values with names that digestCheck
- # uses, since they are not valid when using the old
- # protocol. Typically this is needed for SIZE metadata
- # which corresponds to the size of the unpacked files
- # rather than the binpkg file size, triggering digest
- # verification failures as reported in bug #303211.
- remote_metadata.pop('SIZE', None)
- for k in portage.checksum.hashfunc_map:
- remote_metadata.pop(k, None)
-
- self._remotepkgs[fullpkg] = remote_metadata
- except SystemExit as e:
- raise
- except:
- writemsg(_("!!! Failed to inject remote binary package: %s\n") % fullpkg,
- noiselevel=-1)
- continue
+
self.populated=1
def inject(self, cpv, filename=None):
@@ -1110,6 +1122,10 @@ class binarytree(object):
if not samefile:
self._ensure_dir(os.path.dirname(new_filename))
_movefile(filename, new_filename, mysettings=self.settings)
+ full_path = new_filename
+
+ self._file_permissions(full_path)
+
if self._all_directory and \
self.getname(cpv).split(os.path.sep)[-2] == "All":
self._create_symlink(cpv)
@@ -1157,13 +1173,35 @@ class binarytree(object):
pkgindex.packages.append(d)
self._update_pkgindex_header(pkgindex.header)
- f = atomic_ofstream(os.path.join(self.pkgdir, "Packages"))
- pkgindex.write(f)
- f.close()
+ self._pkgindex_write(pkgindex)
+
finally:
if pkgindex_lock:
unlockfile(pkgindex_lock)
+ def _pkgindex_write(self, pkgindex):
+ contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
+ pkgindex.write(contents)
+ contents = contents.getvalue()
+ atime = mtime = long(pkgindex.header["TIMESTAMP"])
+ output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
+ self._pkgindex_file, None)]
+
+ if "compress-index" in self.settings.features:
+ gz_fname = self._pkgindex_file + ".gz"
+ fileobj = atomic_ofstream(gz_fname, mode="wb")
+ output_files.append((GzipFile(filename='', mode="wb",
+ fileobj=fileobj, mtime=mtime), gz_fname, fileobj))
+
+ for f, fname, f_close in output_files:
+ f.write(contents)
+ f.close()
+ if f_close is not None:
+ f_close.close()
+ self._file_permissions(fname)
+ # some seconds might have elapsed since TIMESTAMP
+ os.utime(fname, (atime, mtime))
+
def _pkgindex_entry(self, cpv):
"""
Performs checksums and evaluates USE flag conditionals.
@@ -1223,6 +1261,16 @@ class binarytree(object):
else:
header.pop(k, None)
+ # These values may be useful for using a binhost without
+ # having a local copy of the profile (bug #470006).
+ for k in self.settings.get("USE_EXPAND_IMPLICIT", "").split():
+ k = "USE_EXPAND_VALUES_" + k
+ v = self.settings.get(k)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
def _pkgindex_version_supported(self, pkgindex):
version = pkgindex.header.get("VERSION")
if version:
@@ -1235,11 +1283,6 @@ class binarytree(object):
def _eval_use_flags(self, cpv, metadata):
use = frozenset(metadata["USE"].split())
- raw_use = use
- iuse = set(f.lstrip("-+") for f in metadata["IUSE"].split())
- use = [f for f in use if f in iuse]
- use.sort()
- metadata["USE"] = " ".join(use)
for k in self._pkgindex_use_evaluated_keys:
if k.endswith('DEPEND'):
token_class = Atom
@@ -1248,7 +1291,7 @@ class binarytree(object):
try:
deps = metadata[k]
- deps = use_reduce(deps, uselist=raw_use, token_class=token_class)
+ deps = use_reduce(deps, uselist=use, token_class=token_class)
deps = paren_enclose(deps)
except portage.exception.InvalidDependString as e:
writemsg("%s: %s\n" % (k, str(e)),
@@ -1313,6 +1356,8 @@ class binarytree(object):
"""Returns the URI to the Packages file for a given package."""
return self._pkgindex_uri.get(pkgname)
+
+
def gettbz2(self, pkgname):
"""Fetches the package from a remote site, if necessary. Attempts to
resume if the file appears to be partially downloaded."""
@@ -1320,7 +1365,7 @@ class binarytree(object):
tbz2name = os.path.basename(tbz2_path)
resume = False
if os.path.exists(tbz2_path):
- if (tbz2name not in self.invalids):
+ if tbz2name[:-5] not in self.invalids:
return
else:
resume = True
@@ -1370,19 +1415,14 @@ class binarytree(object):
f.close()
return pkgindex
- def digestCheck(self, pkg):
- """
- Verify digests for the given package and raise DigestException
- if verification fails.
- @rtype: bool
- @return: True if digests could be located, False otherwise.
- """
- cpv = pkg
- if not isinstance(cpv, basestring):
+ def _get_digests(self, pkg):
+
+ try:
cpv = pkg.cpv
- pkg = None
+ except AttributeError:
+ cpv = pkg
- pkg_path = self.getname(cpv)
+ digests = {}
metadata = None
if self._remotepkgs is None or cpv not in self._remotepkgs:
for d in self._load_pkgindex().packages:
@@ -1392,9 +1432,8 @@ class binarytree(object):
else:
metadata = self._remotepkgs[cpv]
if metadata is None:
- return False
+ return digests
- digests = {}
for k in hashfunc_map:
v = metadata.get(k)
if not v:
@@ -1408,9 +1447,31 @@ class binarytree(object):
writemsg(_("!!! Malformed SIZE attribute in remote " \
"metadata for '%s'\n") % cpv)
+ return digests
+
+ def digestCheck(self, pkg):
+ """
+ Verify digests for the given package and raise DigestException
+ if verification fails.
+ @rtype: bool
+ @return: True if digests could be located, False otherwise.
+ """
+
+ digests = self._get_digests(pkg)
+
if not digests:
return False
+ try:
+ cpv = pkg.cpv
+ except AttributeError:
+ cpv = pkg
+
+ pkg_path = self.getname(cpv)
+ hash_filter = _hash_filter(
+ self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
eout = EOutput()
eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
@@ -1426,9 +1487,7 @@ class binarytree(object):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
- myslot = self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
- except SystemExit as e:
- raise
- except Exception as e:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
pass
return myslot
diff --git a/portage_with_autodep/pym/portage/dbapi/bintree.pyo b/portage_with_autodep/pym/portage/dbapi/bintree.pyo
index f99f377..90d0c6a 100644
--- a/portage_with_autodep/pym/portage/dbapi/bintree.pyo
+++ b/portage_with_autodep/pym/portage/dbapi/bintree.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/dbapi/cpv_expand.py b/portage_with_autodep/pym/portage/dbapi/cpv_expand.py
index 947194c..70ee782 100644
--- a/portage_with_autodep/pym/portage/dbapi/cpv_expand.py
+++ b/portage_with_autodep/pym/portage/dbapi/cpv_expand.py
@@ -1,6 +1,8 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["cpv_expand"]
import portage
diff --git a/portage_with_autodep/pym/portage/dbapi/cpv_expand.pyo b/portage_with_autodep/pym/portage/dbapi/cpv_expand.pyo
index cf1a428..7c38720 100644
--- a/portage_with_autodep/pym/portage/dbapi/cpv_expand.pyo
+++ b/portage_with_autodep/pym/portage/dbapi/cpv_expand.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/dbapi/dep_expand.py b/portage_with_autodep/pym/portage/dbapi/dep_expand.py
index ac8ccf4..3de5d8f 100644
--- a/portage_with_autodep/pym/portage/dbapi/dep_expand.py
+++ b/portage_with_autodep/pym/portage/dbapi/dep_expand.py
@@ -1,6 +1,8 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ["dep_expand"]
import re
@@ -23,7 +25,7 @@ def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
if mydep[0] == "*":
mydep = mydep[1:]
orig_dep = mydep
- has_cat = '/' in orig_dep
+ has_cat = '/' in orig_dep.split(':')[0]
if not has_cat:
alphanum = re.search(r'\w', orig_dep)
if alphanum:
diff --git a/portage_with_autodep/pym/portage/dbapi/dep_expand.pyo b/portage_with_autodep/pym/portage/dbapi/dep_expand.pyo
index b323f5b..bcaf8e3 100644
--- a/portage_with_autodep/pym/portage/dbapi/dep_expand.pyo
+++ b/portage_with_autodep/pym/portage/dbapi/dep_expand.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/dbapi/porttree.py b/portage_with_autodep/pym/portage/dbapi/porttree.py
index c5ee770..fc3fc03 100644
--- a/portage_with_autodep/pym/portage/dbapi/porttree.py
+++ b/portage_with_autodep/pym/portage/dbapi/porttree.py
@@ -1,6 +1,8 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
"close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"
]
@@ -10,7 +12,7 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.checksum',
'portage.data:portage_gid,secpass',
'portage.dbapi.dep_expand:dep_expand',
- 'portage.dep:Atom,dep_getkey,match_from_list,use_reduce',
+ 'portage.dep:Atom,dep_getkey,match_from_list,use_reduce,_match_slot',
'portage.package.ebuild.doebuild:doebuild',
'portage.util:ensure_dirs,shlex_split,writemsg,writemsg_level',
'portage.util.listdir:listdir',
@@ -22,7 +24,8 @@ from portage.cache.cache_errors import CacheError
from portage.cache.mappings import Mapping
from portage.dbapi import dbapi
from portage.exception import PortageException, \
- FileNotFound, InvalidAtom, InvalidDependString, InvalidPackageName
+ FileNotFound, InvalidAtom, InvalidData, \
+ InvalidDependString, InvalidPackageName
from portage.localization import _
from portage import eclass_cache, \
@@ -32,21 +35,74 @@ from portage import os
from portage import _encodings
from portage import _unicode_encode
from portage import OrderedDict
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
-from _emerge.PollScheduler import PollScheduler
import os as _os
import sys
import traceback
import warnings
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
if sys.hexversion >= 0x3000000:
basestring = str
long = int
+def close_portdbapi_caches():
+ # The python interpreter does _not_ guarantee that destructors are
+ # called for objects that remain when the interpreter exits, so we
+ # use an atexit hook to call destructors for any global portdbapi
+ # instances that may have been constructed.
+ try:
+ portage._legacy_globals_constructed
+ except AttributeError:
+ pass
+ else:
+ if "db" in portage._legacy_globals_constructed:
+ try:
+ db = portage.db
+ except AttributeError:
+ pass
+ else:
+ if isinstance(db, dict):
+ for x in db.values():
+ try:
+ if "porttree" in x.lazy_items:
+ continue
+ except (AttributeError, TypeError):
+ continue
+ try:
+ x = x.pop("porttree").dbapi
+ except (AttributeError, KeyError):
+ continue
+ if not isinstance(x, portdbapi):
+ continue
+ x.close_caches()
+
+portage.process.atexit_register(close_portdbapi_caches)
+
+# It used to be necessary for API consumers to remove portdbapi instances
+# from portdbapi_instances, in order to avoid having accumulated instances
+# consume memory. Now, portdbapi_instances is just an empty dummy list, so
+# for backward compatibility, ignore ValueError for removal on non-existent
+# items.
+class _dummy_list(list):
+ def remove(self, item):
+ # TODO: Trigger a DeprecationWarning here, after stable portage
+ # has dummy portdbapi_instances.
+ try:
+ list.remove(self, item)
+ except ValueError:
+ pass
+
class portdbapi(dbapi):
"""this tree will scan a portage directory located at root (passed to init)"""
- portdbapi_instances = []
+ portdbapi_instances = _dummy_list()
_use_mutable = True
@property
@@ -64,14 +120,13 @@ class portdbapi(dbapi):
return None
return main_repo.eclass_db
- def __init__(self, _unused_param=None, mysettings=None):
+ def __init__(self, _unused_param=DeprecationWarning, mysettings=None):
"""
@param _unused_param: deprecated, use mysettings['PORTDIR'] instead
@type _unused_param: None
@param mysettings: an immutable config instance
@type mysettings: portage.config
"""
- portdbapi.portdbapi_instances.append(self)
from portage import config
if mysettings:
@@ -80,7 +135,7 @@ class portdbapi(dbapi):
from portage import settings
self.settings = config(clone=settings)
- if _unused_param is not None:
+ if _unused_param is not DeprecationWarning:
warnings.warn("The first parameter of the " + \
"portage.dbapi.porttree.portdbapi" + \
" constructor is unused since portage-2.1.8. " + \
@@ -95,7 +150,6 @@ class portdbapi(dbapi):
# this purpose because doebuild makes many changes to the config
# instance that is passed in.
self.doebuild_settings = config(clone=self.settings)
- self._scheduler = PollScheduler().sched_iface
self.depcachedir = os.path.realpath(self.settings.depcachedir)
if os.environ.get("SANDBOX_ON") == "1":
@@ -152,10 +206,10 @@ class portdbapi(dbapi):
# portage group.
depcachedir_unshared = True
else:
- cache_kwargs.update({
+ cache_kwargs.update(portage._native_kwargs({
'gid' : portage_gid,
'perms' : 0o664
- })
+ }))
# If secpass < 1, we don't want to write to the cache
# since then we won't be able to apply group permissions
@@ -186,13 +240,25 @@ class portdbapi(dbapi):
self._pregen_auxdb[x] = cache
# Selectively cache metadata in order to optimize dep matching.
self._aux_cache_keys = set(
- ["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
+ ["DEPEND", "EAPI", "HDEPEND",
+ "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
"PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
"RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"])
self._aux_cache = {}
self._broken_ebuilds = set()
+ @property
+ def _event_loop(self):
+ if portage._internal_caller:
+ # For internal portage usage, the global_event_loop is safe.
+ return global_event_loop()
+ else:
+ # For external API consumers, use a local EventLoop, since
+ # we don't want to assume that it's safe to override the
+ # global SIGCHLD handler.
+ return EventLoop(main=False)
+
def _create_pregen_cache(self, tree):
conf = self.repositories.get_repo_for_location(tree)
cache = conf.get_pregenerated_cache(
@@ -202,6 +268,13 @@ class portdbapi(dbapi):
cache.ec = self.repositories.get_repo_for_location(tree).eclass_db
except AttributeError:
pass
+
+ if not cache.complete_eclass_entries:
+ warnings.warn(
+ ("Repository '%s' used deprecated 'pms' cache format. "
+ "Please migrate to 'md5-dict' format.") % (conf.name,),
+ DeprecationWarning)
+
return cache
def _init_cache_dirs(self):
@@ -446,7 +519,7 @@ class portdbapi(dbapi):
proc = EbuildMetadataPhase(cpv=mycpv,
ebuild_hash=ebuild_hash, portdb=self,
- repo_path=mylocation, scheduler=self._scheduler,
+ repo_path=mylocation, scheduler=self._event_loop,
settings=self.doebuild_settings)
proc.start()
@@ -626,13 +699,14 @@ class portdbapi(dbapi):
else:
return 0
- def cp_all(self, categories=None, trees=None):
+ def cp_all(self, categories=None, trees=None, reverse=False):
"""
This returns a list of all keys in our tree or trees
@param categories: optional list of categories to search or
defaults to self.settings.categories
@param trees: optional list of trees to search the categories in or
defaults to self.porttrees
+ @param reverse: reverse sort order (default is False)
@rtype list of [cat/pkg,...]
"""
d = {}
@@ -651,7 +725,7 @@ class portdbapi(dbapi):
continue
d[atom.cp] = None
l = list(d)
- l.sort()
+ l.sort(reverse=reverse)
return l
def cp_list(self, mycp, use_cache=1, mytree=None):
@@ -825,18 +899,24 @@ class portdbapi(dbapi):
# ebuild not in this repo, or masked by corruption
continue
- if visibility_filter and not self._visible(cpv, metadata):
+ try:
+ pkg_str = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings)
+ except InvalidData:
+ continue
+
+ if visibility_filter and not self._visible(pkg_str, metadata):
continue
if mydep.slot is not None and \
- mydep.slot != metadata["SLOT"]:
+ not _match_slot(mydep, pkg_str):
continue
if mydep.unevaluated_atom.use is not None and \
- not self._match_use(mydep, cpv, metadata):
+ not self._match_use(mydep, pkg_str, metadata):
continue
- myval.append(cpv)
+ myval.append(pkg_str)
# only yield a given cpv once
break
@@ -959,19 +1039,16 @@ class portdbapi(dbapi):
return False
if settings._getMissingProperties(cpv, metadata):
return False
+ if settings._getMissingRestrict(cpv, metadata):
+ return False
except InvalidDependString:
return False
return True
-def close_portdbapi_caches():
- for i in portdbapi.portdbapi_instances:
- i.close_caches()
-
-portage.process.atexit_register(portage.portageexit)
-
class portagetree(object):
- def __init__(self, root=None, virtual=DeprecationWarning, settings=None):
+ def __init__(self, root=DeprecationWarning, virtual=DeprecationWarning,
+ settings=None):
"""
Constructor for a PortageTree
@@ -987,7 +1064,7 @@ class portagetree(object):
settings = portage.settings
self.settings = settings
- if root is not None and root != settings['ROOT']:
+ if root is not DeprecationWarning:
warnings.warn("The root parameter of the " + \
"portage.dbapi.porttree.portagetree" + \
" constructor is now unused. Use " + \
@@ -1055,10 +1132,8 @@ class portagetree(object):
"Get a slot for a catpkg; assume it exists."
myslot = ""
try:
- myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
- except SystemExit:
- raise
- except Exception:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
pass
return myslot
@@ -1130,9 +1205,18 @@ def _parse_uri_map(cpv, metadata, use=None):
uri_set = uri_map.get(distfile)
if uri_set is None:
- uri_set = set()
+ # Use OrderedDict to preserve order from SRC_URI
+ # while ensuring uniqueness.
+ uri_set = OrderedDict()
uri_map[distfile] = uri_set
- uri_set.add(uri)
- uri = None
+
+ # SRC_URI may contain a file name with no scheme, and in
+ # this case it does not belong in uri_set.
+ if urlparse(uri).scheme:
+ uri_set[uri] = True
+
+ # Convert OrderedDicts to tuples.
+ for k, v in uri_map.items():
+ uri_map[k] = tuple(v)
return uri_map
diff --git a/portage_with_autodep/pym/portage/dbapi/porttree.pyo b/portage_with_autodep/pym/portage/dbapi/porttree.pyo
index fb57919..43ce4a8 100644
--- a/portage_with_autodep/pym/portage/dbapi/porttree.pyo
+++ b/portage_with_autodep/pym/portage/dbapi/porttree.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/dbapi/vartree.py b/portage_with_autodep/pym/portage/dbapi/vartree.py
index 517c873..ed62323 100644
--- a/portage_with_autodep/pym/portage/dbapi/vartree.py
+++ b/portage_with_autodep/pym/portage/dbapi/vartree.py
@@ -1,6 +1,8 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = [
"vardbapi", "vartree", "dblink"] + \
["write_contents", "tar_contents"]
@@ -11,8 +13,10 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.data:portage_gid,portage_uid,secpass',
'portage.dbapi.dep_expand:dep_expand',
'portage.dbapi._MergeProcess:MergeProcess',
- 'portage.dep:dep_getkey,isjustname,match_from_list,' + \
- 'use_reduce,_slot_re',
+ 'portage.dbapi._SyncfsProcess:SyncfsProcess',
+ 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,' + \
+ 'use_reduce,_slot_separator,_repo_separator',
+ 'portage.eapi:_get_eapi_attrs',
'portage.elog:collect_ebuild_messages,collect_messages,' + \
'elog_process,_merge_logentries',
'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
@@ -20,7 +24,7 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.package.ebuild.doebuild:doebuild_environment,' + \
'_merge_unicode_error', '_spawn_phase',
'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
- 'portage.update:fixdbentries',
+ 'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
'grabdict,normalize_path,new_protect_filename',
@@ -30,15 +34,17 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.util.movefile:movefile',
'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
+ 'portage.util._async.SchedulerInterface:SchedulerInterface',
+ 'portage.util._eventloop.EventLoop:EventLoop',
+ 'portage.util._eventloop.global_event_loop:global_event_loop',
'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \
- '_pkgsplit@pkgsplit,_pkg_str',
+ '_get_slot_re,_pkgsplit@pkgsplit,_pkg_str,_unknown_repo',
'subprocess',
'tarfile',
)
from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
-from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
from portage.dbapi import dbapi
from portage.exception import CommandNotFound, \
InvalidData, InvalidLocation, InvalidPackageName, \
@@ -59,8 +65,8 @@ from portage import _unicode_encode
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildPhase import EbuildPhase
from _emerge.emergelog import emergelog
-from _emerge.PollScheduler import PollScheduler
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.SpawnProcess import SpawnProcess
import errno
import fnmatch
@@ -70,6 +76,7 @@ import io
from itertools import chain
import logging
import os as _os
+import platform
import pwd
import re
import stat
@@ -108,7 +115,8 @@ class vardbapi(dbapi):
_aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
_aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
- def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
+ def __init__(self, _unused_param=DeprecationWarning,
+ categories=None, settings=None, vartree=None):
"""
The categories parameter is unused since the dbapi class
now has a categories property that is generated from the
@@ -138,11 +146,11 @@ class vardbapi(dbapi):
settings = portage.settings
self.settings = settings
- if _unused_param is not None and _unused_param != settings['ROOT']:
+ if _unused_param is not DeprecationWarning:
warnings.warn("The first parameter of the "
"portage.dbapi.vartree.vardbapi"
- " constructor is now unused. Use "
- "settings['ROOT'] instead.",
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
DeprecationWarning, stacklevel=2)
self._eroot = settings['EROOT']
@@ -159,7 +167,7 @@ class vardbapi(dbapi):
self.vartree = vartree
self._aux_cache_keys = set(
["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
- "EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
+ "EAPI", "HDEPEND", "HOMEPAGE", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
"repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
])
@@ -169,15 +177,9 @@ class vardbapi(dbapi):
self._counter_path = os.path.join(self._eroot,
CACHE_PATH, "counter")
- self._plib_registry = None
- if _ENABLE_PRESERVE_LIBS:
- self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
- os.path.join(self._eroot, PRIVATE_PATH,
- "preserved_libs_registry"))
-
- self._linkmap = None
- if _ENABLE_DYN_LINK_MAP:
- self._linkmap = LinkageMap(self)
+ self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
+ os.path.join(self._eroot, PRIVATE_PATH, "preserved_libs_registry"))
+ self._linkmap = LinkageMap(self)
self._owners = self._owners_db(self)
self._cached_counter = None
@@ -318,14 +320,24 @@ class vardbapi(dbapi):
if not origmatches:
return moves
for mycpv in origmatches:
+ try:
+ mycpv = self._pkg_str(mycpv, None)
+ except (KeyError, InvalidData):
+ continue
mycpv_cp = cpv_getkey(mycpv)
if mycpv_cp != origcp:
# Ignore PROVIDE virtual match.
continue
if repo_match is not None \
- and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
+ and not repo_match(mycpv.repo):
continue
- mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
+
+ # Use isvalidatom() to check if this move is valid for the
+ # EAPI (characters allowed in package names may vary).
+ if not isvalidatom(newcp, eapi=mycpv.eapi):
+ continue
+
+ mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
mynewcat = catsplit(newcp)[0]
origpath = self.getpath(mycpv)
if not os.path.exists(origpath):
@@ -355,7 +367,7 @@ class vardbapi(dbapi):
del e
write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
- fixdbentries([mylist], newpath)
+
return moves
def cp_list(self, mycp, use_cache=1):
@@ -363,7 +375,10 @@ class vardbapi(dbapi):
if mysplit[0] == '*':
mysplit[0] = mysplit[0][1:]
try:
- mystat = os.stat(self.getpath(mysplit[0])).st_mtime
+ if sys.hexversion >= 0x3030000:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime_ns
+ else:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime
except OSError:
mystat = 0
if use_cache and mycp in self.cpcache:
@@ -498,7 +513,10 @@ class vardbapi(dbapi):
return list(self._iter_match(mydep,
self.cp_list(mydep.cp, use_cache=use_cache)))
try:
- curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
+ if sys.hexversion >= 0x3030000:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime_ns
+ else:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
except (IOError, OSError):
curmtime=0
@@ -553,31 +571,32 @@ class vardbapi(dbapi):
def _aux_cache_init(self):
aux_cache = None
open_kwargs = {}
- if sys.hexversion >= 0x3000000:
+ if sys.hexversion >= 0x3000000 and sys.hexversion < 0x3020000:
# Buffered io triggers extreme performance issues in
# Unpickler.load() (problem observed with python-3.0.1).
# Unfortunately, performance is still poor relative to
- # python-2.x, but buffering makes it much worse.
+ # python-2.x, but buffering makes it much worse (problem
+ # appears to be solved in Python >=3.2 at least).
open_kwargs["buffering"] = 0
try:
- f = open(_unicode_encode(self._aux_cache_filename,
+ with open(_unicode_encode(self._aux_cache_filename,
encoding=_encodings['fs'], errors='strict'),
- mode='rb', **open_kwargs)
- mypickle = pickle.Unpickler(f)
- try:
- mypickle.find_global = None
- except AttributeError:
- # TODO: If py3k, override Unpickler.find_class().
- pass
- aux_cache = mypickle.load()
- f.close()
- del f
- except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
+ mode='rb', **open_kwargs) as f:
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ aux_cache = mypickle.load()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception as e:
if isinstance(e, EnvironmentError) and \
getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
pass
else:
- writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
+ writemsg(_("!!! Error loading '%s': %s\n") % \
(self._aux_cache_filename, e), noiselevel=-1)
del e
@@ -644,7 +663,8 @@ class vardbapi(dbapi):
if e.errno != errno.ENOENT:
raise
raise KeyError(mycpv)
- mydir_mtime = mydir_stat[stat.ST_MTIME]
+ # Use float mtime when available.
+ mydir_mtime = mydir_stat.st_mtime
pkg_data = self._aux_cache["packages"].get(mycpv)
pull_me = cache_these.union(wants)
mydata = {"_mtime_" : mydir_mtime}
@@ -657,13 +677,18 @@ class vardbapi(dbapi):
pkg_data = None
else:
cache_mtime, metadata = pkg_data
- if not isinstance(cache_mtime, (long, int)) or \
+ if not isinstance(cache_mtime, (float, long, int)) or \
not isinstance(metadata, dict):
pkg_data = None
if pkg_data:
cache_mtime, metadata = pkg_data
- cache_valid = cache_mtime == mydir_mtime
+ if isinstance(cache_mtime, float):
+ cache_valid = cache_mtime == mydir_stat.st_mtime
+ else:
+ # Cache may contain integer mtime.
+ cache_valid = cache_mtime == mydir_stat[stat.ST_MTIME]
+
if cache_valid:
# Migrate old metadata to unicode.
for k, v in metadata.items():
@@ -687,10 +712,11 @@ class vardbapi(dbapi):
(mydir_mtime, cache_data)
self._aux_cache["modified"].add(mycpv)
- if _slot_re.match(mydata['SLOT']) is None:
+ eapi_attrs = _get_eapi_attrs(mydata['EAPI'])
+ if _get_slot_re(eapi_attrs).match(mydata['SLOT']) is None:
# Empty or invalid slot triggers InvalidAtom exceptions when
# generating slot atoms for packages, so translate it to '0' here.
- mydata['SLOT'] = _unicode_decode('0')
+ mydata['SLOT'] = '0'
return [mydata[x] for x in wants]
@@ -715,21 +741,18 @@ class vardbapi(dbapi):
results[x] = st[stat.ST_MTIME]
continue
try:
- myf = io.open(
+ with io.open(
_unicode_encode(os.path.join(mydir, x),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- try:
- myd = myf.read()
- finally:
- myf.close()
+ errors='replace') as f:
+ myd = f.read()
except IOError:
if x not in self._aux_cache_keys and \
self._aux_cache_keys_re.match(x) is None:
env_keys.append(x)
continue
- myd = _unicode_decode('')
+ myd = ''
# Preserve \n for metadata that is known to
# contain multiple lines.
@@ -743,13 +766,13 @@ class vardbapi(dbapi):
for k in env_keys:
v = env_results.get(k)
if v is None:
- v = _unicode_decode('')
+ v = ''
if self._aux_multi_line_re.match(k) is None:
v = " ".join(v.split())
results[k] = v
if results.get("EAPI") == "":
- results[_unicode_decode("EAPI")] = _unicode_decode('0')
+ results["EAPI"] = '0'
return results
@@ -869,11 +892,17 @@ class vardbapi(dbapi):
del myroot
counter = -1
try:
- cfile = io.open(
+ with io.open(
_unicode_encode(self._counter_path,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
+ errors='replace') as f:
+ try:
+ counter = long(f.readline().strip())
+ except (OverflowError, ValueError) as e:
+ writemsg(_("!!! COUNTER file is corrupt: '%s'\n") %
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
except EnvironmentError as e:
# Silently allow ENOENT since files under
# /var/cache/ are allowed to disappear.
@@ -882,17 +911,6 @@ class vardbapi(dbapi):
self._counter_path, noiselevel=-1)
writemsg("!!! %s\n" % str(e), noiselevel=-1)
del e
- else:
- try:
- try:
- counter = long(cfile.readline().strip())
- finally:
- cfile.close()
- except (OverflowError, ValueError) as e:
- writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
- self._counter_path, noiselevel=-1)
- writemsg("!!! %s\n" % str(e), noiselevel=-1)
- del e
if self._cached_counter == counter:
max_counter = counter
@@ -984,16 +1002,31 @@ class vardbapi(dbapi):
relative_filename = filename[root_len:]
contents_key = pkg._match_contents(relative_filename)
if contents_key:
- del new_contents[contents_key]
+ # It's possible for two different paths to refer to the same
+ # contents_key, due to directory symlinks. Therefore, pass a
+ # default value to pop, in order to avoid a KeyError which
+ # could otherwise be triggered (see bug #454400).
+ new_contents.pop(contents_key, None)
removed += 1
if removed:
- self._bump_mtime(pkg.mycpv)
- f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
- write_contents(new_contents, root, f)
- f.close()
- self._bump_mtime(pkg.mycpv)
- pkg._clear_contents_cache()
+ self.writeContentsToContentsFile(pkg, new_contents)
+
+ def writeContentsToContentsFile(self, pkg, new_contents):
+ """
+ @param pkg: package to write contents file for
+ @type pkg: dblink
+ @param new_contents: contents to write to CONTENTS file
+ @type new_contents: contents dictionary of the form
+ {u'/path/to/file' : (contents_attribute 1, ...), ...}
+ """
+ root = self.settings['ROOT']
+ self._bump_mtime(pkg.mycpv)
+ f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
+ write_contents(new_contents, root, f)
+ f.close()
+ self._bump_mtime(pkg.mycpv)
+ pkg._clear_contents_cache()
class _owners_cache(object):
"""
@@ -1238,18 +1271,35 @@ class vardbapi(dbapi):
name = os.path.basename(path.rstrip(os.path.sep))
path_info_list.append((path, name, is_basename))
+ # Do work via the global event loop, so that it can be used
+ # for indication of progress during the search (bug #461412).
+ event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
root = self._vardb._eroot
- for cpv in self._vardb.cpv_all():
- dblnk = self._vardb._dblink(cpv)
+ def search_pkg(cpv):
+ dblnk = self._vardb._dblink(cpv)
for path, name, is_basename in path_info_list:
if is_basename:
for p in dblnk.getcontents():
if os.path.basename(p) == name:
- yield dblnk, p[len(root):]
+ search_pkg.results.append((dblnk, p[len(root):]))
else:
if dblnk.isowner(path):
- yield dblnk, path
+ search_pkg.results.append((dblnk, path))
+ search_pkg.complete = True
+ return False
+
+ search_pkg.results = []
+
+ for cpv in self._vardb.cpv_all():
+ del search_pkg.results[:]
+ search_pkg.complete = False
+ event_loop.idle_add(search_pkg, cpv)
+ while not search_pkg.complete:
+ event_loop.iteration()
+ for result in search_pkg.results:
+ yield result
class vartree(object):
"this tree will scan a var/db/pkg database located at root (passed to init)"
@@ -1370,7 +1420,7 @@ class vartree(object):
def getslot(self, mycatpkg):
"Get a slot for a catpkg; assume it exists."
try:
- return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+ return self.dbapi._pkg_str(mycatpkg, None).slot
except KeyError:
return ""
@@ -1463,11 +1513,16 @@ class dblink(object):
self._contents_inodes = None
self._contents_basenames = None
self._linkmap_broken = False
+ self._device_path_map = {}
self._hardlink_merge_map = {}
self._hash_key = (self._eroot, self.mycpv)
self._protect_obj = None
self._pipe = pipe
+ # When necessary, this attribute is modified for
+ # compliance with RESTRICT=preserve-libs.
+ self._preserve_libs = "preserve-libs" in mysettings.features
+
def __hash__(self):
return hash(self._hash_key)
@@ -1510,7 +1565,11 @@ class dblink(object):
"""
Remove this entry from the database
"""
- if not os.path.exists(self.dbdir):
+ try:
+ os.lstat(self.dbdir)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.ESTALE):
+ raise
return
# Check validity of self.dbdir before attempting to remove it.
@@ -1527,6 +1586,14 @@ class dblink(object):
pass
self.vartree.dbapi._remove(self)
+ # Use self.dbroot since we need an existing path for syncfs.
+ try:
+ self._merged_path(self.dbroot, os.lstat(self.dbroot))
+ except OSError:
+ pass
+
+ self._post_merge_sync()
+
def clearcontents(self):
"""
For a given db entry (self), erase the CONTENTS values.
@@ -1552,18 +1619,18 @@ class dblink(object):
return self.contentscache
pkgfiles = {}
try:
- myc = io.open(_unicode_encode(contents_file,
+ with io.open(_unicode_encode(contents_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
+ errors='replace') as f:
+ mylines = f.readlines()
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
del e
self.contentscache = pkgfiles
return pkgfiles
- mylines = myc.readlines()
- myc.close()
+
null_byte = "\0"
normalize_needed = self._normalize_needed
contents_re = self._contents_re
@@ -1578,7 +1645,7 @@ class dblink(object):
if myroot == os.path.sep:
myroot = None
# used to generate parent dir entries
- dir_entry = (_unicode_decode("dir"),)
+ dir_entry = ("dir",)
eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
pos = 0
errors = []
@@ -1678,8 +1745,11 @@ class dblink(object):
unmerge_preserve = \
self._find_libs_to_preserve(unmerge=True)
counter = self.vartree.dbapi.cpv_counter(self.mycpv)
- plib_registry.unregister(self.mycpv,
- self.settings["SLOT"], counter)
+ try:
+ slot = self.mycpv.slot
+ except AttributeError:
+ slot = _pkg_str(self.mycpv, slot=self.settings["SLOT"]).slot
+ plib_registry.unregister(self.mycpv, slot, counter)
if unmerge_preserve:
for path in sorted(unmerge_preserve):
contents_key = self._match_contents(path)
@@ -1689,7 +1759,7 @@ class dblink(object):
self._display_merge(_(">>> needed %s %s\n") % \
(obj_type, contents_key), noiselevel=-1)
plib_registry.register(self.mycpv,
- self.settings["SLOT"], counter, unmerge_preserve)
+ slot, counter, unmerge_preserve)
# Remove the preserved files from our contents
# so that they won't be unmerged.
self.vartree.dbapi.removeFromContents(self,
@@ -1759,7 +1829,8 @@ class dblink(object):
if self._scheduler is None:
# We create a scheduler instance and use it to
# log unmerge output separately from merge output.
- self._scheduler = PollScheduler().sched_iface
+ self._scheduler = SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
self.settings["PORTAGE_BACKGROUND"] = "1"
@@ -1775,11 +1846,16 @@ class dblink(object):
showMessage = self._display_merge
if self.vartree.dbapi._categories is not None:
self.vartree.dbapi._categories = None
+
+ # When others_in_slot is not None, the backup has already been
+ # handled by the caller.
+ caller_handles_backup = others_in_slot is not None
+
# When others_in_slot is supplied, the security check has already been
# done for this slot, so it shouldn't be repeated until the next
# replacement or unmerge operation.
if others_in_slot is None:
- slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+ slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
slot_matches = self.vartree.dbapi.match(
"%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
others_in_slot = []
@@ -1823,8 +1899,9 @@ class dblink(object):
except UnsupportedAPIException as e:
eapi_unsupported = e
- self._prune_plib_registry(unmerge=True, needed=needed,
- preserve_paths=preserve_paths)
+ if self._preserve_libs and "preserve-libs" in \
+ self.settings["PORTAGE_RESTRICT"].split():
+ self._preserve_libs = False
builddir_lock = None
scheduler = self._scheduler
@@ -1832,7 +1909,7 @@ class dblink(object):
try:
# Only create builddir_lock if the caller
# has not already acquired the lock.
- if "PORTAGE_BUILDIR_LOCKED" not in self.settings:
+ if "PORTAGE_BUILDDIR_LOCKED" not in self.settings:
builddir_lock = EbuildBuildDir(
scheduler=scheduler,
settings=self.settings)
@@ -1840,6 +1917,19 @@ class dblink(object):
prepare_build_dirs(settings=self.settings, cleanup=True)
log_path = self.settings.get("PORTAGE_LOG_FILE")
+ # Do this before the following _prune_plib_registry call, since
+ # that removes preserved libraries from our CONTENTS, and we
+ # may want to backup those libraries first.
+ if not caller_handles_backup:
+ retval = self._pre_unmerge_backup(background)
+ if retval != os.EX_OK:
+ showMessage(_("!!! FAILED prerm: quickpkg: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+ return retval
+
+ self._prune_plib_registry(unmerge=True, needed=needed,
+ preserve_paths=preserve_paths)
+
# Log the error after PORTAGE_LOG_FILE is initialized
# by prepare_build_dirs above.
if eapi_unsupported:
@@ -1848,7 +1938,7 @@ class dblink(object):
showMessage(_("!!! FAILED prerm: %s\n") % \
os.path.join(self.dbdir, "EAPI"),
level=logging.ERROR, noiselevel=-1)
- showMessage(_unicode_decode("%s\n") % (eapi_unsupported,),
+ showMessage("%s\n" % (eapi_unsupported,),
level=logging.ERROR, noiselevel=-1)
elif os.path.isfile(myebuildpath):
phase = EbuildPhase(background=background,
@@ -2037,7 +2127,7 @@ class dblink(object):
if others_in_slot is None:
others_in_slot = []
- slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+ slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
slot_matches = self.vartree.dbapi.match(
"%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
for cur_cpv in slot_matches:
@@ -2062,7 +2152,9 @@ class dblink(object):
#process symlinks second-to-last, directories last.
mydirs = set()
- modprotect = os.path.join(self._eroot, "lib/modules/")
+
+ uninstall_ignore = portage.util.shlex_split(
+ self.settings.get("UNINSTALL_IGNORE", ""))
def unlink(file_name, lstatobj):
if bsd_chflags:
@@ -2092,6 +2184,14 @@ class dblink(object):
self._eerror("postrm",
["Could not chmod or unlink '%s': %s" % \
(file_name, ose)])
+ else:
+
+ # Even though the file no longer exists, we log it
+ # here so that _unmerge_dirs can see that we've
+ # removed a file from this device, and will record
+ # the parent directory for a syncfs call.
+ self._merged_path(file_name, lstatobj, exists=False)
+
finally:
if bsd_chflags and pflags != 0:
# Restore the parent flags we saved before unlinking
@@ -2169,6 +2269,24 @@ class dblink(object):
if lstatobj is None:
show_unmerge("---", unmerge_desc["!found"], file_type, obj)
continue
+
+ f_match = obj[len(eroot)-1:]
+ ignore = False
+ for pattern in uninstall_ignore:
+ if fnmatch.fnmatch(f_match, pattern):
+ ignore = True
+ break
+
+ if not ignore:
+ if islink and f_match in \
+ ("/lib", "/usr/lib", "/usr/local/lib"):
+ # Ignore libdir symlinks for bug #423127.
+ ignore = True
+
+ if ignore:
+ show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
+ continue
+
# don't use EROOT, CONTENTS entries already contain EPREFIX
if obj.startswith(real_root):
relative_path = obj[real_root_len:]
@@ -2178,8 +2296,9 @@ class dblink(object):
is_owned = True
break
- if file_type == "sym" and is_owned and \
- (islink and statobj and stat.S_ISDIR(statobj.st_mode)):
+ if is_owned and islink and \
+ file_type in ("sym", "dir") and \
+ statobj and stat.S_ISDIR(statobj.st_mode):
# A new instance of this package claims the file, so
# don't unmerge it. If the file is symlink to a
# directory and the unmerging package installed it as
@@ -2211,18 +2330,6 @@ class dblink(object):
continue
elif relative_path in cfgfiledict:
stale_confmem.append(relative_path)
- # next line includes a tweak to protect modules from being unmerged,
- # but we don't protect modules from being overwritten if they are
- # upgraded. We effectively only want one half of the config protection
- # functionality for /lib/modules. For portage-ng both capabilities
- # should be able to be independently specified.
- # TODO: For rebuilds, re-parent previous modules to the new
- # installed instance (so they are not orphans). For normal
- # uninstall (not rebuild/reinstall), remove the modules along
- # with all other files (leave no orphans).
- if obj.startswith(modprotect):
- show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
- continue
# Don't unlink symlinks to directories here since that can
# remove /lib and /usr/lib symlinks.
@@ -2244,12 +2351,12 @@ class dblink(object):
show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
continue
- if pkgfiles[objkey][0] == "dir":
+ if file_type == "dir" and not islink:
if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
continue
mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
- elif pkgfiles[objkey][0] == "sym":
+ elif file_type == "sym" or (file_type == "dir" and islink):
if not islink:
show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
continue
@@ -2359,7 +2466,11 @@ class dblink(object):
if protected_symlinks:
msg = "One or more symlinks to directories have been " + \
"preserved in order to ensure that files installed " + \
- "via these symlinks remain accessible:"
+ "via these symlinks remain accessible. " + \
+ "This indicates that the mentioned symlink(s) may " + \
+ "be obsolete remnants of an old install, and it " + \
+ "may be appropriate to replace a given symlink " + \
+ "with the directory that it points to."
lines = textwrap.wrap(msg, 72)
lines.append("")
flat_list = set()
@@ -2369,7 +2480,7 @@ class dblink(object):
lines.append("\t%s" % (os.path.join(real_root,
f.lstrip(os.sep))))
lines.append("")
- self._elog("eerror", "postrm", lines)
+ self._elog("elog", "postrm", lines)
# Remove stale entries from config memory.
if stale_confmem:
@@ -2501,15 +2612,19 @@ class dblink(object):
raise
del e
show_unmerge("!!!", "", "obj", child)
+
try:
+ parent_name = os.path.dirname(obj)
+ parent_stat = os.stat(parent_name)
+
if bsd_chflags:
lstatobj = os.lstat(obj)
if lstatobj.st_flags != 0:
bsd_chflags.lchflags(obj, 0)
- parent_name = os.path.dirname(obj)
+
# Use normal stat/chflags for the parent since we want to
# follow any symlinks to the real parent directory.
- pflags = os.stat(parent_name).st_flags
+ pflags = parent_stat.st_flags
if pflags != 0:
bsd_chflags.chflags(parent_name, 0)
try:
@@ -2518,13 +2633,34 @@ class dblink(object):
if bsd_chflags and pflags != 0:
# Restore the parent flags we saved before unlinking
bsd_chflags.chflags(parent_name, pflags)
+
+ # Record the parent directory for use in syncfs calls.
+ # Note that we use a realpath and a regular stat here, since
+ # we want to follow any symlinks back to the real device where
+ # the real parent directory resides.
+ self._merged_path(os.path.realpath(parent_name), parent_stat)
+
show_unmerge("<<<", "", "dir", obj)
except EnvironmentError as e:
if e.errno not in ignored_rmdir_errnos:
raise
if e.errno != errno.ENOENT:
show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
- del e
+
+ # Since we didn't remove this directory, record the directory
+ # itself for use in syncfs calls, if we have removed another
+ # file from the same device.
+ # Note that we use a realpath and a regular stat here, since
+ # we want to follow any symlinks back to the real device where
+ # the real directory resides.
+ try:
+ dir_stat = os.stat(obj)
+ except OSError:
+ pass
+ else:
+ if dir_stat.st_dev in self._device_path_map:
+ self._merged_path(os.path.realpath(obj), dir_stat)
+
else:
# When a directory is successfully removed, there's
# no need to protect symlinks that point to it.
@@ -2751,7 +2887,7 @@ class dblink(object):
self.vartree.dbapi._linkmap is None or \
self.vartree.dbapi._plib_registry is None or \
(not unmerge and self._installed_instance is None) or \
- "preserve-libs" not in self.settings.features:
+ not self._preserve_libs:
return set()
os = _os_merge
@@ -3335,7 +3471,10 @@ class dblink(object):
else:
logdir = os.path.join(self.settings["T"], "logging")
ebuild_logentries = collect_ebuild_messages(logdir)
- py_logentries = collect_messages(key=cpv).get(cpv, {})
+ # phasefilter is irrelevant for the above collect_ebuild_messages
+ # call, since this package instance has a private logdir. However,
+ # it may be relevant for the following collect_messages call.
+ py_logentries = collect_messages(key=cpv, phasefilter=phasefilter).get(cpv, {})
logentries = _merge_logentries(py_logentries, ebuild_logentries)
funcnames = {
"INFO": "einfo",
@@ -3356,7 +3495,9 @@ class dblink(object):
str_buffer.append(' '.join(fields))
str_buffer.append('\n')
if str_buffer:
- os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
+ str_buffer = _unicode_encode(''.join(str_buffer))
+ while str_buffer:
+ str_buffer = str_buffer[os.write(self._pipe, str_buffer):]
def _emerge_log(self, msg):
emergelog(False, msg)
@@ -3414,6 +3555,7 @@ class dblink(object):
level=logging.ERROR, noiselevel=-1)
return 1
+ is_binpkg = self.settings.get("EMERGE_FROM") == "binary"
slot = ''
for var_name in ('CHOST', 'SLOT'):
if var_name == 'CHOST' and self.cat == 'virtual':
@@ -3423,22 +3565,18 @@ class dblink(object):
pass
continue
- f = None
try:
- f = io.open(_unicode_encode(
+ with io.open(_unicode_encode(
os.path.join(inforoot, var_name),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- val = f.readline().strip()
+ errors='replace') as f:
+ val = f.readline().strip()
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
del e
val = ''
- finally:
- if f is not None:
- f.close()
if var_name == 'SLOT':
slot = val
@@ -3451,7 +3589,9 @@ class dblink(object):
return 1
write_atomic(os.path.join(inforoot, var_name), slot + '\n')
- if val != self.settings.get(var_name, ''):
+ # This check only applies when built from source, since
+ # inforoot values are written just after src_install.
+ if not is_binpkg and val != self.settings.get(var_name, ''):
self._eqawarn('preinst',
[_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
{"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
@@ -3462,30 +3602,47 @@ class dblink(object):
if not os.path.exists(self.dbcatdir):
ensure_dirs(self.dbcatdir)
+ # NOTE: We use SLOT obtained from the inforoot
+ # directory, in order to support USE=multislot.
+ # Use _pkg_str discard the sub-slot part if necessary.
+ slot = _pkg_str(self.mycpv, slot=slot).slot
cp = self.mysplit[0]
slot_atom = "%s:%s" % (cp, slot)
- # filter any old-style virtual matches
- slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
- if cpv_getkey(cpv) == cp]
-
- if self.mycpv not in slot_matches and \
- self.vartree.dbapi.cpv_exists(self.mycpv):
- # handle multislot or unapplied slotmove
- slot_matches.append(self.mycpv)
-
- others_in_slot = []
- from portage import config
- for cur_cpv in slot_matches:
- # Clone the config in case one of these has to be unmerged since
- # we need it to have private ${T} etc... for things like elog.
- settings_clone = config(clone=self.settings)
- settings_clone.pop("PORTAGE_BUILDIR_LOCKED", None)
- settings_clone.reset()
- others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
- settings=settings_clone,
- vartree=self.vartree, treetype="vartree",
- scheduler=self._scheduler, pipe=self._pipe))
+ self.lockdb()
+ try:
+ # filter any old-style virtual matches
+ slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom)
+ if cpv_getkey(cpv) == cp]
+
+ if self.mycpv not in slot_matches and \
+ self.vartree.dbapi.cpv_exists(self.mycpv):
+ # handle multislot or unapplied slotmove
+ slot_matches.append(self.mycpv)
+
+ others_in_slot = []
+ for cur_cpv in slot_matches:
+ # Clone the config in case one of these has to be unmerged,
+ # since we need it to have private ${T} etc... for things
+ # like elog.
+ settings_clone = portage.config(clone=self.settings)
+ settings_clone.pop("PORTAGE_BUILDDIR_LOCKED", None)
+ settings_clone.setcpv(cur_cpv, mydb=self.vartree.dbapi)
+ if self._preserve_libs and "preserve-libs" in \
+ settings_clone["PORTAGE_RESTRICT"].split():
+ self._preserve_libs = False
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=settings_clone,
+ vartree=self.vartree, treetype="vartree",
+ scheduler=self._scheduler, pipe=self._pipe))
+ finally:
+ self.unlockdb()
+
+ # If any instance has RESTRICT=preserve-libs, then
+ # restrict it for all instances.
+ if not self._preserve_libs:
+ for dblnk in others_in_slot:
+ dblnk._preserve_libs = False
retval = self._security_check(others_in_slot)
if retval:
@@ -3596,6 +3753,13 @@ class dblink(object):
# to an infinite recursion loop.
mylinklist.append(relative_path)
+ myto = _unicode_decode(
+ _os.readlink(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict')),
+ encoding=_encodings['merge'], errors='replace')
+ if line_ending_re.search(myto) is not None:
+ paths_with_newlines.append(relative_path)
+
if unicode_error:
break
@@ -3647,7 +3811,7 @@ class dblink(object):
_("Manually run `emerge --unmerge =%s` if you "
"really want to remove the above files. Set "
"PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
- "/etc/make.conf if you do not want to "
+ "/etc/portage/make.conf if you do not want to "
"abort in cases like this.") % other_dblink.mycpv,
wrap_width))
eerror(msg)
@@ -3713,7 +3877,9 @@ class dblink(object):
" enough information to determine if a real problem"
" exists. Please do NOT file a bug report at"
" http://bugs.gentoo.org unless you report exactly which"
- " two packages install the same file(s). Once again,"
+ " two packages install the same file(s). See"
+ " http://wiki.gentoo.org/wiki/Knowledge_Base:Blockers"
+ " for tips on how to solve the problem. And once again,"
" please do NOT file a bug report unless you have"
" completely understood the above message.")
@@ -3748,17 +3914,28 @@ class dblink(object):
# get_owners is slow for large numbers of files, so
# don't look them all up.
collisions = collisions[:20]
+
+ pkg_info_strs = {}
self.lockdb()
try:
owners = self.vartree.dbapi._owners.get_owners(collisions)
self.vartree.dbapi.flush_cache()
+
+ for pkg in owners:
+ pkg = self.vartree.dbapi._pkg_str(pkg.mycpv, None)
+ pkg_info_str = "%s%s%s" % (pkg,
+ _slot_separator, pkg.slot)
+ if pkg.repo != _unknown_repo:
+ pkg_info_str += "%s%s" % (_repo_separator,
+ pkg.repo)
+ pkg_info_strs[pkg] = pkg_info_str
+
finally:
self.unlockdb()
for pkg, owned_files in owners.items():
- cpv = pkg.mycpv
msg = []
- msg.append("%s" % cpv)
+ msg.append(pkg_info_strs[pkg.mycpv])
for f in sorted(owned_files):
msg.append("\t%s" % os.path.join(destroot,
f.lstrip(os.path.sep)))
@@ -3814,6 +3991,20 @@ class dblink(object):
self.delete()
ensure_dirs(self.dbtmpdir)
+ downgrade = False
+ if self._installed_instance is not None and \
+ vercmp(self.mycpv.version,
+ self._installed_instance.mycpv.version) < 0:
+ downgrade = True
+
+ if self._installed_instance is not None:
+ rval = self._pre_merge_backup(self._installed_instance, downgrade)
+ if rval != os.EX_OK:
+ showMessage(_("!!! FAILED preinst: ") +
+ "quickpkg: %s\n" % rval,
+ level=logging.ERROR, noiselevel=-1)
+ return rval
+
# run preinst script
showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
{"cpv":self.mycpv, "destroot":destroot})
@@ -3835,32 +4026,26 @@ class dblink(object):
# write local package counter for recording
if counter is None:
counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
- f = io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
+ with io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
- f.write(_unicode_decode(str(counter)))
- f.close()
+ errors='backslashreplace') as f:
+ f.write("%s" % counter)
self.updateprotect()
#if we have a file containing previously-merged config file md5sums, grab it.
self.vartree.dbapi._fs_lock()
try:
+ # Always behave like --noconfmem is enabled for downgrades
+ # so that people who don't know about this option are less
+ # likely to get confused when doing upgrade/downgrade cycles.
cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
- if "NOCONFMEM" in self.settings:
+ if "NOCONFMEM" in self.settings or downgrade:
cfgfiledict["IGNORE"]=1
else:
cfgfiledict["IGNORE"]=0
- # Always behave like --noconfmem is enabled for downgrades
- # so that people who don't know about this option are less
- # likely to get confused when doing upgrade/downgrade cycles.
- for other in others_in_slot:
- if vercmp(self.mycpv.version, other.mycpv.version) < 0:
- cfgfiledict["IGNORE"] = 1
- break
-
rval = self._merge_contents(srcroot, destroot, cfgfiledict)
if rval != os.EX_OK:
return rval
@@ -3970,6 +4155,7 @@ class dblink(object):
try:
self.delete()
_movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+ self._merged_path(self.dbpkgdir, os.lstat(self.dbpkgdir))
finally:
self.unlockdb()
@@ -4014,9 +4200,9 @@ class dblink(object):
self.vartree.dbapi.lock()
try:
try:
- slot, counter = self.vartree.dbapi.aux_get(
- cpv, ["SLOT", "COUNTER"])
- except KeyError:
+ slot = self.vartree.dbapi._pkg_str(cpv, None).slot
+ counter = self.vartree.dbapi.cpv_counter(cpv)
+ except (KeyError, InvalidData):
pass
else:
has_vdb_entry = True
@@ -4085,6 +4271,7 @@ class dblink(object):
# For gcc upgrades, preserved libs have to be removed after the
# the library path has been updated.
self._prune_plib_registry()
+ self._post_merge_sync()
return os.EX_OK
@@ -4100,7 +4287,7 @@ class dblink(object):
x = -1
while True:
x += 1
- backup_p = p + '.backup.' + str(x).rjust(4, '0')
+ backup_p = '%s.backup.%04d' % (p, x)
try:
os.lstat(backup_p)
except OSError:
@@ -4201,8 +4388,9 @@ class dblink(object):
@type stufftomerge: String or List
@param cfgfiledict: { File:mtime } mapping for config_protected files
@type cfgfiledict: Dictionary
- @param thismtime: The current time (typically long(time.time())
- @type thismtime: Long
+ @param thismtime: None or new mtime for merged files (expressed in seconds
+ in Python <3.3 and nanoseconds in Python >=3.3)
+ @type thismtime: None or Int
@rtype: None or Boolean
@return:
1. True on failure
@@ -4227,18 +4415,18 @@ class dblink(object):
# this is supposed to merge a list of files. There will be 2 forms of argument passing.
if isinstance(stufftomerge, basestring):
#A directory is specified. Figure out protection paths, listdir() it and process it.
- mergelist = os.listdir(join(srcroot, stufftomerge))
- offset = stufftomerge
+ mergelist = [join(stufftomerge, child) for child in \
+ os.listdir(join(srcroot, stufftomerge))]
else:
- mergelist = stufftomerge
- offset = ""
+ mergelist = stufftomerge[:]
- for i, x in enumerate(mergelist):
+ while mergelist:
- mysrc = join(srcroot, offset, x)
- mydest = join(destroot, offset, x)
+ relative_path = mergelist.pop()
+ mysrc = join(srcroot, relative_path)
+ mydest = join(destroot, relative_path)
# myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
- myrealdest = join(sep, offset, x)
+ myrealdest = join(sep, relative_path)
# stat file once, test using S_* macros many times (faster that way)
mystat = os.lstat(mysrc)
mymode = mystat[stat.ST_MODE]
@@ -4333,9 +4521,26 @@ class dblink(object):
mymtime = movefile(mysrc, mydest, newmtime=thismtime,
sstat=mystat, mysettings=self.settings,
encoding=_encodings['merge'])
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
if mymtime != None:
+ # Use lexists, since if the target happens to be a broken
+ # symlink then that should trigger an independent warning.
+ if not (os.path.lexists(myrealto) or
+ os.path.lexists(join(srcroot, myabsto))):
+ self._eqawarn('preinst',
+ [_("QA Notice: Symbolic link /%s points to /%s which does not exist.")
+ % (relative_path, myabsto)])
+
showMessage(">>> %s -> %s\n" % (mydest, myto))
- outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ if sys.hexversion >= 0x3030000:
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime // 1000000000)+"\n")
+ else:
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
else:
showMessage(_("!!! Failed to move file.\n"),
level=logging.ERROR, noiselevel=-1)
@@ -4429,11 +4634,17 @@ class dblink(object):
os.chmod(mydest, mystat[0])
os.chown(mydest, mystat[4], mystat[5])
showMessage(">>> %s/\n" % mydest)
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
outfile.write("dir "+myrealdest+"\n")
# recurse and merge this directory
- if self.mergeme(srcroot, destroot, outfile, secondhand,
- join(offset, x), cfgfiledict, thismtime):
- return 1
+ mergelist.extend(join(relative_path, child) for child in
+ os.listdir(join(srcroot, relative_path)))
+
elif stat.S_ISREG(mymode):
# we are merging a regular file
mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
@@ -4489,7 +4700,10 @@ class dblink(object):
cfgprot = cfgfiledict["IGNORE"]
if not moveme:
zing = "---"
- mymtime = mystat[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ mymtime = mystat.st_mtime_ns
+ else:
+ mymtime = mystat[stat.ST_MTIME]
else:
moveme = 1
cfgprot = 1
@@ -4525,8 +4739,16 @@ class dblink(object):
hardlink_candidates.append(mydest)
zing = ">>>"
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
if mymtime != None:
- outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ if sys.hexversion >= 0x3030000:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime // 1000000000)+"\n")
+ else:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
showMessage("%s %s\n" % (zing,mydest))
else:
# we are merging a fifo or device node
@@ -4537,6 +4759,12 @@ class dblink(object):
sstat=mystat, mysettings=self.settings,
encoding=_encodings['merge']) is not None:
zing = ">>>"
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
else:
return 1
if stat.S_ISFIFO(mymode):
@@ -4545,6 +4773,52 @@ class dblink(object):
outfile.write("dev %s\n" % myrealdest)
showMessage(zing + " " + mydest + "\n")
+ def _merged_path(self, path, lstatobj, exists=True):
+ previous_path = self._device_path_map.get(lstatobj.st_dev)
+ if previous_path is None or previous_path is False or \
+ (exists and len(path) < len(previous_path)):
+ if exists:
+ self._device_path_map[lstatobj.st_dev] = path
+ else:
+ # This entry is used to indicate that we've unmerged
+ # a file from this device, and later, this entry is
+ # replaced by a parent directory.
+ self._device_path_map[lstatobj.st_dev] = False
+
+ def _post_merge_sync(self):
+ """
+ Call this after merge or unmerge, in order to sync relevant files to
+ disk and avoid data-loss in the event of a power failure. This method
+ does nothing if FEATURES=merge-sync is disabled.
+ """
+ if not self._device_path_map or \
+ "merge-sync" not in self.settings.features:
+ return
+
+ returncode = None
+ if platform.system() == "Linux":
+
+ paths = []
+ for path in self._device_path_map.values():
+ if path is not False:
+ paths.append(path)
+ paths = tuple(paths)
+
+ proc = SyncfsProcess(paths=paths,
+ scheduler=(self._scheduler or
+ portage._internal_caller and global_event_loop() or
+ EventLoop(main=False)))
+ proc.start()
+ returncode = proc.wait()
+
+ if returncode is None or returncode != os.EX_OK:
+ try:
+ proc = subprocess.Popen(["sync"])
+ except EnvironmentError:
+ pass
+ else:
+ proc.wait()
+
def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
mydbapi=None, prev_mtimes=None, counter=None):
"""
@@ -4557,7 +4831,8 @@ class dblink(object):
self.lockdb()
self.vartree.dbapi._bump_mtime(self.mycpv)
if self._scheduler is None:
- self._scheduler = PollScheduler().sched_iface
+ self._scheduler = SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
try:
retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
@@ -4608,11 +4883,12 @@ class dblink(object):
"returns contents of a file with whitespace converted to spaces"
if not os.path.exists(self.dbdir+"/"+name):
return ""
- mydata = io.open(
+ with io.open(
_unicode_encode(os.path.join(self.dbdir, name),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace'
- ).read().split()
+ ) as f:
+ mydata = f.read().split()
return " ".join(mydata)
def copyfile(self,fname):
@@ -4621,10 +4897,11 @@ class dblink(object):
def getfile(self,fname):
if not os.path.exists(self.dbdir+"/"+fname):
return ""
- return io.open(_unicode_encode(os.path.join(self.dbdir, fname),
+ with io.open(_unicode_encode(os.path.join(self.dbdir, fname),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace'
- ).read()
+ ) as f:
+ return f.read()
def setfile(self,fname,data):
kwargs = {}
@@ -4633,16 +4910,18 @@ class dblink(object):
else:
kwargs['mode'] = 'w'
kwargs['encoding'] = _encodings['repo.content']
- write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
+ write_atomic(os.path.join(self.dbdir, fname), data,
+ **portage._native_kwargs(kwargs))
def getelements(self,ename):
if not os.path.exists(self.dbdir+"/"+ename):
return []
- mylines = io.open(_unicode_encode(
+ with io.open(_unicode_encode(
os.path.join(self.dbdir, ename),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace'
- ).readlines()
+ ) as f:
+ mylines = f.readlines()
myreturn = []
for x in mylines:
for y in x[:-1].split():
@@ -4650,23 +4929,82 @@ class dblink(object):
return myreturn
def setelements(self,mylist,ename):
- myelement = io.open(_unicode_encode(
+ with io.open(_unicode_encode(
os.path.join(self.dbdir, ename),
encoding=_encodings['fs'], errors='strict'),
mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
- for x in mylist:
- myelement.write(_unicode_decode(x+"\n"))
- myelement.close()
+ errors='backslashreplace') as f:
+ for x in mylist:
+ f.write("%s\n" % x)
def isregular(self):
"Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
+ def _pre_merge_backup(self, backup_dblink, downgrade):
+
+ if ("unmerge-backup" in self.settings.features or
+ (downgrade and "downgrade-backup" in self.settings.features)):
+ return self._quickpkg_dblink(backup_dblink, False, None)
+
+ return os.EX_OK
+
+ def _pre_unmerge_backup(self, background):
+
+ if "unmerge-backup" in self.settings.features :
+ logfile = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ return self._quickpkg_dblink(self, background, logfile)
+
+ return os.EX_OK
+
+ def _quickpkg_dblink(self, backup_dblink, background, logfile):
+
+ trees = QueryCommand.get_db()[self.settings["EROOT"]]
+ bintree = trees["bintree"]
+ binpkg_path = bintree.getname(backup_dblink.mycpv)
+ if os.path.exists(binpkg_path) and \
+ catsplit(backup_dblink.mycpv)[1] not in bintree.invalids:
+ return os.EX_OK
+
+ self.lockdb()
+ try:
+
+ if not backup_dblink.exists():
+ # It got unmerged by a concurrent process.
+ return os.EX_OK
+
+ # Call quickpkg for support of QUICKPKG_DEFAULT_OPTS and stuff.
+ quickpkg_binary = os.path.join(self.settings["PORTAGE_BIN_PATH"],
+ "quickpkg")
+
+ # Let quickpkg inherit the global vartree config's env.
+ env = dict(self.vartree.settings.items())
+ env["__PORTAGE_INHERIT_VARDB_LOCK"] = "1"
+
+ pythonpath = [x for x in env.get('PYTHONPATH', '').split(":") if x]
+ if not pythonpath or \
+ not os.path.samefile(pythonpath[0], portage._pym_path):
+ pythonpath.insert(0, portage._pym_path)
+ env['PYTHONPATH'] = ":".join(pythonpath)
+
+ quickpkg_proc = SpawnProcess(
+ args=[portage._python_interpreter, quickpkg_binary,
+ "=%s" % (backup_dblink.mycpv,)],
+ background=background, env=env,
+ scheduler=self._scheduler, logfile=logfile)
+ quickpkg_proc.start()
+
+ return quickpkg_proc.wait()
+
+ finally:
+ self.unlockdb()
+
def merge(mycat, mypkg, pkgloc, infloc,
myroot=None, settings=None, myebuild=None,
mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
- scheduler=None):
+ scheduler=None, fd_pipes=None):
"""
@param myroot: ignored, settings['EROOT'] is used instead
"""
@@ -4681,10 +5019,12 @@ def merge(mycat, mypkg, pkgloc, infloc,
merge_task = MergeProcess(
mycat=mycat, mypkg=mypkg, settings=settings,
treetype=mytree, vartree=vartree,
- scheduler=(scheduler or PollScheduler().sched_iface),
+ scheduler=(scheduler or portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
background=background, blockers=blockers, pkgloc=pkgloc,
infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
- prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
+ prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'),
+ fd_pipes=fd_pipes)
merge_task.start()
retcode = merge_task.wait()
return retcode
@@ -4864,13 +5204,11 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None):
tar.addfile(tarinfo, f)
f.close()
else:
- f = open(_unicode_encode(path,
+ with open(_unicode_encode(path,
encoding=encoding,
- errors='strict'), 'rb')
- try:
+ errors='strict'), 'rb') as f:
tar.addfile(tarinfo, f)
- finally:
- f.close()
+
else:
tar.addfile(tarinfo)
if onProgress:
diff --git a/portage_with_autodep/pym/portage/dbapi/vartree.pyo b/portage_with_autodep/pym/portage/dbapi/vartree.pyo
index 7c186cf..745d15f 100644
--- a/portage_with_autodep/pym/portage/dbapi/vartree.pyo
+++ b/portage_with_autodep/pym/portage/dbapi/vartree.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/dbapi/virtual.py b/portage_with_autodep/pym/portage/dbapi/virtual.py
index da15983..ba9745c 100644
--- a/portage_with_autodep/pym/portage/dbapi/virtual.py
+++ b/portage_with_autodep/pym/portage/dbapi/virtual.py
@@ -1,6 +1,7 @@
-# Copyright 1998-2012 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
@@ -74,30 +75,55 @@ class fakedbapi(dbapi):
@param metadata: dict
"""
self._clear_cache()
- if not hasattr(mycpv, 'cp'):
+
+ try:
+ mycp = mycpv.cp
+ except AttributeError:
+ mycp = None
+ try:
+ myslot = mycpv.slot
+ except AttributeError:
+ myslot = None
+
+ if mycp is None or \
+ (myslot is None and metadata is not None and metadata.get('SLOT')):
if metadata is None:
mycpv = _pkg_str(mycpv)
else:
- mycpv = _pkg_str(mycpv, slot=metadata.get('SLOT'),
- repo=metadata.get('repository'))
- mycp = mycpv.cp
+ mycpv = _pkg_str(mycpv, metadata=metadata,
+ settings=self.settings)
+
+ mycp = mycpv.cp
+ try:
+ myslot = mycpv.slot
+ except AttributeError:
+ pass
+
self.cpvdict[mycpv] = metadata
- myslot = None
- if self._exclusive_slots and metadata:
- myslot = metadata.get("SLOT", None)
+ if not self._exclusive_slots:
+ myslot = None
if myslot and mycp in self.cpdict:
# If necessary, remove another package in the same SLOT.
for cpv in self.cpdict[mycp]:
if mycpv != cpv:
- other_metadata = self.cpvdict[cpv]
- if other_metadata:
- if myslot == other_metadata.get("SLOT", None):
+ try:
+ other_slot = cpv.slot
+ except AttributeError:
+ pass
+ else:
+ if myslot == other_slot:
self.cpv_remove(cpv)
break
- if mycp not in self.cpdict:
- self.cpdict[mycp] = []
- if not mycpv in self.cpdict[mycp]:
- self.cpdict[mycp].append(mycpv)
+
+ cp_list = self.cpdict.get(mycp)
+ if cp_list is None:
+ cp_list = []
+ self.cpdict[mycp] = cp_list
+ try:
+ cp_list.remove(mycpv)
+ except ValueError:
+ pass
+ cp_list.append(mycpv)
def cpv_remove(self,mycpv):
"""Removes a cpv from the list of available packages."""
diff --git a/portage_with_autodep/pym/portage/dbapi/virtual.pyo b/portage_with_autodep/pym/portage/dbapi/virtual.pyo
index 9f7c667..ca52263 100644
--- a/portage_with_autodep/pym/portage/dbapi/virtual.pyo
+++ b/portage_with_autodep/pym/portage/dbapi/virtual.pyo
Binary files differ