aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexander Bersenev <bay@hackerdom.ru>2014-02-17 17:57:05 +0600
committerAlexander Bersenev <bay@hackerdom.ru>2014-02-17 17:57:05 +0600
commit6563293d18daed502ccdb663f3c72b4bae5fe23a (patch)
treed0a7d53a7c137feb4073c963408829f88ea75c92 /portage_with_autodep/pym/portage/util
parentupdated portage to 2.2.8-r1 (diff)
downloadautodep-master.tar.gz
autodep-master.tar.bz2
autodep-master.zip
updated portage to 2.2.8-r1HEADmaster
Diffstat (limited to 'portage_with_autodep/pym/portage/util')
-rw-r--r--portage_with_autodep/pym/portage/util/ExtractKernelVersion.py6
-rw-r--r--portage_with_autodep/pym/portage/util/ExtractKernelVersion.pyobin2297 -> 2293 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/SlotObject.py1
-rw-r--r--portage_with_autodep/pym/portage/util/SlotObject.pyobin1719 -> 1711 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/__init__.py390
-rw-r--r--portage_with_autodep/pym/portage/util/__init__.pyobin47487 -> 51257 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_desktop_entry.py85
-rw-r--r--portage_with_autodep/pym/portage/util/_desktop_entry.pyobin2878 -> 3495 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py24
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.pyobin26399 -> 26552 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.pyobin8884 -> 8858 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_dyn_libs/__init__.pyobin144 -> 142 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_eventloop/EventLoop.py364
-rw-r--r--portage_with_autodep/pym/portage/util/_eventloop/EventLoop.pyobin13508 -> 18011 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.pyobin1029 -> 1023 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_eventloop/PollConstants.pyobin787 -> 783 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.py2
-rw-r--r--portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.pyobin2220 -> 2224 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_eventloop/__init__.pyobin145 -> 143 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.pyobin878 -> 874 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_pty.pyobin1936 -> 1932 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/_urlopen.py99
-rw-r--r--portage_with_autodep/pym/portage/util/_urlopen.pyobin1626 -> 3962 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/digraph.py36
-rw-r--r--portage_with_autodep/pym/portage/util/digraph.pyobin10678 -> 10653 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/env_update.py77
-rw-r--r--portage_with_autodep/pym/portage/util/env_update.pyobin9095 -> 9804 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/lafilefixer.py10
-rw-r--r--portage_with_autodep/pym/portage/util/lafilefixer.pyobin3621 -> 3615 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/listdir.py128
-rw-r--r--portage_with_autodep/pym/portage/util/listdir.pyobin4088 -> 3899 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/movefile.py220
-rw-r--r--portage_with_autodep/pym/portage/util/movefile.pyobin8236 -> 10716 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/mtimedb.pyobin3770 -> 3760 bytes
-rw-r--r--portage_with_autodep/pym/portage/util/whirlpool.py2
-rw-r--r--portage_with_autodep/pym/portage/util/whirlpool.pyobin38994 -> 39086 bytes
36 files changed, 980 insertions, 464 deletions
diff --git a/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py
index 69bd58a..af4a4fe 100644
--- a/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py
+++ b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py
@@ -61,18 +61,18 @@ def ExtractKernelVersion(base_dir):
# Grab a list of files named localversion* and sort them
localversions = os.listdir(base_dir)
- for x in range(len(localversions)-1,-1,-1):
+ for x in range(len(localversions) - 1, -1, -1):
if localversions[x][:12] != "localversion":
del localversions[x]
localversions.sort()
# Append the contents of each to the version string, stripping ALL whitespace
for lv in localversions:
- version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
+ version += "".join(" ".join(grabfile(base_dir + "/" + lv)).split())
# Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
kernelconfig = getconfig(base_dir+"/.config")
if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
- return (version,None)
+ return (version, None)
diff --git a/portage_with_autodep/pym/portage/util/ExtractKernelVersion.pyo b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.pyo
index d0302fd..132515d 100644
--- a/portage_with_autodep/pym/portage/util/ExtractKernelVersion.pyo
+++ b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/SlotObject.py b/portage_with_autodep/pym/portage/util/SlotObject.py
index a59dfc1..4bb6822 100644
--- a/portage_with_autodep/pym/portage/util/SlotObject.py
+++ b/portage_with_autodep/pym/portage/util/SlotObject.py
@@ -48,4 +48,3 @@ class SlotObject(object):
setattr(obj, myattr, getattr(self, myattr))
return obj
-
diff --git a/portage_with_autodep/pym/portage/util/SlotObject.pyo b/portage_with_autodep/pym/portage/util/SlotObject.pyo
index 11d0ec7..08133cb 100644
--- a/portage_with_autodep/pym/portage/util/SlotObject.pyo
+++ b/portage_with_autodep/pym/portage/util/SlotObject.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/__init__.py b/portage_with_autodep/pym/portage/util/__init__.py
index 2e0a32b..24553da 100644
--- a/portage_with_autodep/pym/portage/util/__init__.py
+++ b/portage_with_autodep/pym/portage/util/__init__.py
@@ -1,6 +1,8 @@
-# Copyright 2004-2012 Gentoo Foundation
+# Copyright 2004-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['apply_permissions', 'apply_recursive_permissions',
'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
@@ -31,21 +33,26 @@ import portage
portage.proxy.lazyimport.lazyimport(globals(),
'pickle',
'portage.dep:Atom',
- 'portage.util.listdir:_ignorecvs_dirs'
+ 'subprocess',
)
from portage import os
-from portage import subprocess_getstatusoutput
from portage import _encodings
from portage import _os_merge
from portage import _unicode_encode
from portage import _unicode_decode
+from portage.const import VCS_DIRS
from portage.exception import InvalidAtom, PortageException, FileNotFound, \
OperationNotPermitted, ParseError, PermissionDenied, ReadOnlyFileSystem
from portage.localization import _
from portage.proxy.objectproxy import ObjectProxy
from portage.cache.mappings import UserDict
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
noiselimit = 0
def initialize_logger(level=logging.WARN):
@@ -57,7 +64,7 @@ def initialize_logger(level=logging.WARN):
"""
logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
-def writemsg(mystr,noiselevel=0,fd=None):
+def writemsg(mystr, noiselevel=0, fd=None):
"""Prints out warning and debug messages based on the noiselimit setting"""
global noiselimit
if fd is None:
@@ -75,7 +82,7 @@ def writemsg(mystr,noiselevel=0,fd=None):
fd.write(mystr)
fd.flush()
-def writemsg_stdout(mystr,noiselevel=0):
+def writemsg_stdout(mystr, noiselevel=0):
"""Prints messages stdout based on the noiselimit setting"""
writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
@@ -100,7 +107,7 @@ def writemsg_level(msg, level=0, noiselevel=0):
writemsg(msg, noiselevel=noiselevel, fd=fd)
def normalize_path(mypath):
- """
+ """
os.path.normpath("//foo") returns "//foo" instead of "/foo"
We dislike this behavior so we create our own normpath func
to fix it.
@@ -120,8 +127,8 @@ def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False
"""This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
begins with a #, it is ignored, as are empty lines"""
- mylines=grablines(myfilename, recursive, remember_source_file=True)
- newlines=[]
+ mylines = grablines(myfilename, recursive, remember_source_file=True)
+ newlines = []
for x, source_file in mylines:
#the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
@@ -139,10 +146,10 @@ def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False
myline = " ".join(myline)
if not myline:
continue
- if myline[0]=="#":
+ if myline[0] == "#":
# Check if we have a compat-level string. BC-integration data.
# '##COMPAT==>N<==' 'some string attached to it'
- mylinetest = myline.split("<==",1)
+ mylinetest = myline.split("<==", 1)
if len(mylinetest) == 2:
myline_potential = mylinetest[1]
mylinetest = mylinetest[0].split("##COMPAT==>")
@@ -159,7 +166,7 @@ def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False
newlines.append(myline)
return newlines
-def map_dictlist_vals(func,myDict):
+def map_dictlist_vals(func, myDict):
"""Performs a function on each value of each key in a dictlist.
Returns a new dictlist."""
new_dl = {}
@@ -173,7 +180,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
Stacks an array of dict-types into one array. Optionally merging or
overwriting matching key/value pairs for the dict[key]->list.
Returns a single dict. Higher index in lists is preferenced.
-
+
Example usage:
>>> from portage.util import stack_dictlist
>>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
@@ -188,7 +195,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
>>> { 'KEYWORDS':['alpha'] }
>>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
>>> { 'KEYWORDS':['alpha'] }
-
+
@param original_dicts a list of (dictionary objects or None)
@type list
@param incremental True or false depending on whether new keys should overwrite
@@ -199,7 +206,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
@type list
@param ignore_none Appears to be ignored, but probably was used long long ago.
@type boolean
-
+
"""
final_dict = {}
for mydict in original_dicts:
@@ -208,7 +215,7 @@ def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0
for y in mydict:
if not y in final_dict:
final_dict[y] = []
-
+
for thing in mydict[y]:
if thing:
if incremental or y in incrementals:
@@ -245,12 +252,13 @@ def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
def append_repo(atom_list, repo_name, remember_source_file=False):
"""
Takes a list of valid atoms without repo spec and appends ::repo_name.
+ If an atom already has a repo part, then it is preserved (see bug #461948).
"""
if remember_source_file:
- return [(Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True), source) \
+ return [(atom.repo is not None and atom or atom.with_repo(repo_name), source) \
for atom, source in atom_list]
else:
- return [Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True) \
+ return [atom.repo is not None and atom or atom.with_repo(repo_name) \
for atom in atom_list]
def stack_lists(lists, incremental=1, remember_source_file=False,
@@ -334,7 +342,7 @@ def stack_lists(lists, incremental=1, remember_source_file=False,
def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
"""
This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
-
+
@param myfilename: file to process
@type myfilename: string (path)
@param juststrings: only return strings
@@ -350,9 +358,9 @@ def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
1. Returns the lines in a file in a dictionary, for example:
'sys-apps/portage x86 amd64 ppc'
would return
- { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
+ {"sys-apps/portage" : ['x86', 'amd64', 'ppc']}
"""
- newdict={}
+ newdict = {}
for x in grablines(myfilename, recursive):
#the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
#into single spaces.
@@ -379,52 +387,75 @@ def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
newdict[k] = " ".join(v)
return newdict
-def read_corresponding_eapi_file(filename):
+_eapi_cache = {}
+
+def read_corresponding_eapi_file(filename, default="0"):
"""
Read the 'eapi' file from the directory 'filename' is in.
Returns "0" if the file is not present or invalid.
"""
- default = "0"
eapi_file = os.path.join(os.path.dirname(filename), "eapi")
try:
- f = io.open(_unicode_encode(eapi_file,
+ eapi = _eapi_cache[eapi_file]
+ except KeyError:
+ pass
+ else:
+ if eapi is None:
+ return default
+ return eapi
+
+ eapi = None
+ try:
+ with io.open(_unicode_encode(eapi_file,
encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'], errors='replace')
- lines = f.readlines()
+ mode='r', encoding=_encodings['repo.content'], errors='replace') as f:
+ lines = f.readlines()
if len(lines) == 1:
eapi = lines[0].rstrip("\n")
else:
writemsg(_("--- Invalid 'eapi' file (doesn't contain exactly one line): %s\n") % (eapi_file),
noiselevel=-1)
- eapi = default
- f.close()
except IOError:
- eapi = default
+ pass
+ _eapi_cache[eapi_file] = eapi
+ if eapi is None:
+ return default
return eapi
def grabdict_package(myfilename, juststrings=0, recursive=0, allow_wildcard=False, allow_repo=False,
verify_eapi=False, eapi=None):
""" Does the same thing as grabdict except it validates keys
with isvalidatom()"""
- pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
- if not pkgs:
- return pkgs
- if verify_eapi and eapi is None:
- eapi = read_corresponding_eapi_file(myfilename)
- # We need to call keys() here in order to avoid the possibility of
- # "RuntimeError: dictionary changed size during iteration"
- # when an invalid atom is deleted.
+ if recursive:
+ file_list = _recursive_file_list(myfilename)
+ else:
+ file_list = [myfilename]
+
atoms = {}
- for k, v in pkgs.items():
- try:
- k = Atom(k, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
- except InvalidAtom as e:
- writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
- noiselevel=-1)
- else:
- atoms[k] = v
+ for filename in file_list:
+ d = grabdict(filename, juststrings=False,
+ empty=True, recursive=False, incremental=True)
+ if not d:
+ continue
+ if verify_eapi and eapi is None:
+ eapi = read_corresponding_eapi_file(myfilename)
+
+ for k, v in d.items():
+ try:
+ k = Atom(k, allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ writemsg(_("--- Invalid atom in %s: %s\n") % (filename, e),
+ noiselevel=-1)
+ else:
+ atoms.setdefault(k, []).extend(v)
+
+ if juststrings:
+ for k, v in atoms.items():
+ atoms[k] = " ".join(v)
+
return atoms
def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=False, allow_repo=False,
@@ -447,10 +478,10 @@ def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=Fals
try:
pkg = Atom(pkg, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
except InvalidAtom as e:
- writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
+ writemsg(_("--- Invalid atom in %s: %s\n") % (source_file, e),
noiselevel=-1)
else:
- if pkg_orig == str(pkg):
+ if pkg_orig == _unicode(pkg):
# normal atom, so return as Atom instance
if remember_source_file:
atoms.append((pkg, source_file))
@@ -464,34 +495,73 @@ def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=Fals
atoms.append(pkg_orig)
return atoms
+def _recursive_basename_filter(f):
+ return not f.startswith(".") and not f.endswith("~")
+
+def _recursive_file_list(path):
+ # path may be a regular file or a directory
+
+ def onerror(e):
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(path)
+
+ stack = [os.path.split(path)]
+
+ while stack:
+ parent, fname = stack.pop()
+ fullpath = os.path.join(parent, fname)
+
+ try:
+ st = os.stat(fullpath)
+ except OSError as e:
+ onerror(e)
+ continue
+
+ if stat.S_ISDIR(st.st_mode):
+ if fname in VCS_DIRS or not _recursive_basename_filter(fname):
+ continue
+ try:
+ children = os.listdir(fullpath)
+ except OSError as e:
+ onerror(e)
+ continue
+
+ # Sort in reverse, since we pop from the end of the stack.
+ # Include regular files in the stack, so files are sorted
+ # together with directories.
+ children.sort(reverse=True)
+ stack.extend((fullpath, x) for x in children)
+
+ elif stat.S_ISREG(st.st_mode):
+ if _recursive_basename_filter(fname):
+ yield fullpath
+
def grablines(myfilename, recursive=0, remember_source_file=False):
- mylines=[]
- if recursive and os.path.isdir(myfilename):
- if os.path.basename(myfilename) in _ignorecvs_dirs:
- return mylines
- dirlist = os.listdir(myfilename)
- dirlist.sort()
- for f in dirlist:
- if not f.startswith(".") and not f.endswith("~"):
- mylines.extend(grablines(
- os.path.join(myfilename, f), recursive, remember_source_file))
+ mylines = []
+ if recursive:
+ for f in _recursive_file_list(myfilename):
+ mylines.extend(grablines(f, recursive=False,
+ remember_source_file=remember_source_file))
+
else:
try:
- myfile = io.open(_unicode_encode(myfilename,
+ with io.open(_unicode_encode(myfilename,
encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['content'], errors='replace')
- if remember_source_file:
- mylines = [(line, myfilename) for line in myfile.readlines()]
- else:
- mylines = myfile.readlines()
- myfile.close()
+ mode='r', encoding=_encodings['content'], errors='replace') as myfile:
+ if remember_source_file:
+ mylines = [(line, myfilename) for line in myfile.readlines()]
+ else:
+ mylines = myfile.readlines()
except IOError as e:
if e.errno == PermissionDenied.errno:
raise PermissionDenied(myfilename)
- pass
+ elif e.errno in (errno.ENOENT, errno.ESTALE):
+ pass
+ else:
+ raise
return mylines
-def writedict(mydict,myfilename,writekey=True):
+def writedict(mydict, myfilename, writekey=True):
"""Writes out a dict to a file; writekey=0 mode doesn't write out
the key and assumes all values are strings, not lists."""
lines = []
@@ -517,18 +587,39 @@ def shlex_split(s):
rval = [_unicode_decode(x) for x in rval]
return rval
-class _tolerant_shlex(shlex.shlex):
+class _getconfig_shlex(shlex.shlex):
+
+ def __init__(self, portage_tolerant=False, **kwargs):
+ shlex.shlex.__init__(self, **kwargs)
+ self.__portage_tolerant = portage_tolerant
+
def sourcehook(self, newfile):
try:
return shlex.shlex.sourcehook(self, newfile)
except EnvironmentError as e:
- writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \
- (self.infile, str(e)), noiselevel=-1)
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(newfile)
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ writemsg("open('%s', 'r'): %s\n" % (newfile, e), noiselevel=-1)
+ raise
+
+ msg = self.error_leader()
+ if e.errno == errno.ENOTDIR:
+ msg += _("%s: Not a directory") % newfile
+ else:
+ msg += _("%s: No such file or directory") % newfile
+
+ if self.__portage_tolerant:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ else:
+ raise ParseError(msg)
return (newfile, io.StringIO())
_invalid_var_name_re = re.compile(r'^\d|\W')
-def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
+def getconfig(mycfg, tolerant=False, allow_sourcing=False, expand=True,
+ recursive=False):
+
if isinstance(expand, dict):
# Some existing variable definitions have been
# passed in, for use in substitutions.
@@ -537,6 +628,21 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
else:
expand_map = {}
mykeys = {}
+
+ if recursive:
+ # Emulate source commands so that syntax error messages
+ # can display real file names and line numbers.
+ if not expand:
+ expand_map = False
+ fname = None
+ for fname in _recursive_file_list(mycfg):
+ mykeys.update(getconfig(fname, tolerant=tolerant,
+ allow_sourcing=allow_sourcing, expand=expand_map,
+ recursive=False) or {})
+ if fname is None:
+ return None
+ return mykeys
+
f = None
try:
# NOTE: shlex doesn't support unicode objects with Python 2
@@ -561,49 +667,53 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
if f is not None:
f.close()
+ # Since this file has unicode_literals enabled, and Python 2's
+ # shlex implementation does not support unicode, the following code
+ # uses _native_string() to encode unicode literals when necessary.
+
# Workaround for avoiding a silent error in shlex that is
# triggered by a source statement at the end of the file
# without a trailing newline after the source statement.
- if content and content[-1] != '\n':
- content += '\n'
+ if content and content[-1] != portage._native_string('\n'):
+ content += portage._native_string('\n')
# Warn about dos-style line endings since that prevents
# people from being able to source them with bash.
- if '\r' in content:
+ if portage._native_string('\r') in content:
writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \
"in config file: '%s'") + "\n") % mycfg, noiselevel=-1)
lex = None
try:
- if tolerant:
- shlex_class = _tolerant_shlex
- else:
- shlex_class = shlex.shlex
# The default shlex.sourcehook() implementation
# only joins relative paths when the infile
# attribute is properly set.
- lex = shlex_class(content, infile=mycfg, posix=True)
- lex.wordchars = string.digits + string.ascii_letters + \
- "~!@#$%*_\:;?,./-+{}"
- lex.quotes="\"'"
+ lex = _getconfig_shlex(instream=content, infile=mycfg, posix=True,
+ portage_tolerant=tolerant)
+ lex.wordchars = portage._native_string(string.digits +
+ string.ascii_letters + "~!@#$%*_\:;?,./-+{}")
+ lex.quotes = portage._native_string("\"'")
if allow_sourcing:
- lex.source="source"
- while 1:
- key=lex.get_token()
+ lex.source = portage._native_string("source")
+
+ while True:
+ key = _unicode_decode(lex.get_token())
if key == "export":
- key = lex.get_token()
+ key = _unicode_decode(lex.get_token())
if key is None:
#normal end of file
- break;
- equ=lex.get_token()
- if (equ==''):
+ break
+
+ equ = _unicode_decode(lex.get_token())
+ if not equ:
msg = lex.error_leader() + _("Unexpected EOF")
if not tolerant:
raise ParseError(msg)
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
- elif (equ!='='):
+
+ elif equ != "=":
msg = lex.error_leader() + \
_("Invalid token '%s' (not '=')") % (equ,)
if not tolerant:
@@ -611,7 +721,8 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
- val=lex.get_token()
+
+ val = _unicode_decode(lex.get_token())
if val is None:
msg = lex.error_leader() + \
_("Unexpected end of config file: variable '%s'") % (key,)
@@ -620,8 +731,6 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
- key = _unicode_decode(key)
- val = _unicode_decode(val)
if _invalid_var_name_re.search(key) is not None:
msg = lex.error_leader() + \
@@ -642,7 +751,7 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
except Exception as e:
if isinstance(e, ParseError) or lex is None:
raise
- msg = _unicode_decode("%s%s") % (lex.error_leader(), e)
+ msg = "%s%s" % (lex.error_leader(), e)
writemsg("%s\n" % msg, noiselevel=-1)
raise
@@ -660,10 +769,10 @@ def varexpand(mystring, mydict=None, error_leader=None):
This code is used by the configfile code, as well as others (parser)
This would be a good bunch of code to port to C.
"""
- numvars=0
- #in single, double quotes
- insing=0
- indoub=0
+ numvars = 0
+ # in single, double quotes
+ insing = 0
+ indoub = 0
pos = 0
length = len(mystring)
newstring = []
@@ -675,7 +784,7 @@ def varexpand(mystring, mydict=None, error_leader=None):
else:
newstring.append("'") # Quote removal is handled by shlex.
insing=not insing
- pos=pos+1
+ pos += 1
continue
elif current == '"':
if (insing):
@@ -683,9 +792,9 @@ def varexpand(mystring, mydict=None, error_leader=None):
else:
newstring.append('"') # Quote removal is handled by shlex.
indoub=not indoub
- pos=pos+1
+ pos += 1
continue
- if (not insing):
+ if not insing:
#expansion time
if current == "\n":
#convert newlines to spaces
@@ -700,7 +809,7 @@ def varexpand(mystring, mydict=None, error_leader=None):
# escaped newline characters. Note that we don't handle
# escaped quotes here, since getconfig() uses shlex
# to handle that earlier.
- if (pos+1>=len(mystring)):
+ if pos + 1 >= len(mystring):
newstring.append(current)
break
else:
@@ -722,15 +831,15 @@ def varexpand(mystring, mydict=None, error_leader=None):
newstring.append(mystring[pos - 2:pos])
continue
elif current == "$":
- pos=pos+1
- if mystring[pos]=="{":
- pos=pos+1
- braced=True
+ pos += 1
+ if mystring[pos] == "{":
+ pos += 1
+ braced = True
else:
- braced=False
- myvstart=pos
+ braced = False
+ myvstart = pos
while mystring[pos] in _varexpand_word_chars:
- if (pos+1)>=len(mystring):
+ if pos + 1 >= len(mystring):
if braced:
msg = _varexpand_unexpected_eof_msg
if error_leader is not None:
@@ -738,20 +847,20 @@ def varexpand(mystring, mydict=None, error_leader=None):
writemsg(msg + "\n", noiselevel=-1)
return ""
else:
- pos=pos+1
+ pos += 1
break
- pos=pos+1
- myvarname=mystring[myvstart:pos]
+ pos += 1
+ myvarname = mystring[myvstart:pos]
if braced:
- if mystring[pos]!="}":
+ if mystring[pos] != "}":
msg = _varexpand_unexpected_eof_msg
if error_leader is not None:
msg = error_leader() + msg
writemsg(msg + "\n", noiselevel=-1)
return ""
else:
- pos=pos+1
- if len(myvarname)==0:
+ pos += 1
+ if len(myvarname) == 0:
msg = "$"
if braced:
msg += "{}"
@@ -760,7 +869,7 @@ def varexpand(mystring, mydict=None, error_leader=None):
msg = error_leader() + msg
writemsg(msg + "\n", noiselevel=-1)
return ""
- numvars=numvars+1
+ numvars += 1
if myvarname in mydict:
newstring.append(mydict[myvarname])
else:
@@ -775,9 +884,9 @@ def varexpand(mystring, mydict=None, error_leader=None):
# broken and removed, but can still be imported
pickle_write = None
-def pickle_read(filename,default=None,debug=0):
+def pickle_read(filename, default=None, debug=0):
if not os.access(filename, os.R_OK):
- writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
+ writemsg(_("pickle_read(): File not readable. '") + filename + "'\n", 1)
return default
data = None
try:
@@ -786,12 +895,12 @@ def pickle_read(filename,default=None,debug=0):
mypickle = pickle.Unpickler(myf)
data = mypickle.load()
myf.close()
- del mypickle,myf
- writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
+ del mypickle, myf
+ writemsg(_("pickle_read(): Loaded pickle. '") + filename + "'\n", 1)
except SystemExit as e:
raise
except Exception as e:
- writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
+ writemsg(_("!!! Failed to load pickle: ") + str(e) + "\n", 1)
data = default
return data
@@ -819,6 +928,9 @@ class cmp_sort_key(object):
list.sort(), making it easier to port code for python-3.0 compatibility.
It works by generating key objects which use the given cmp function to
implement their __lt__ method.
+
+ Beginning with Python 2.7 and 3.2, equivalent functionality is provided
+ by functools.cmp_to_key().
"""
__slots__ = ("_cmp_func",)
@@ -911,6 +1023,10 @@ def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
modified = False
+ # Since Python 3.4, chown requires int type (no proxies).
+ uid = int(uid)
+ gid = int(gid)
+
if stat_cached is None:
try:
if follow_links:
@@ -1130,7 +1246,7 @@ class atomic_ofstream(ObjectProxy):
object.__setattr__(self, '_file',
open_func(_unicode_encode(tmp_name,
encoding=_encodings['fs'], errors='strict'),
- mode=mode, **kargs))
+ mode=mode, **portage._native_kwargs(kargs)))
return
except IOError as e:
if canonical_path == filename:
@@ -1212,7 +1328,7 @@ class atomic_ofstream(ObjectProxy):
self.close()
def __del__(self):
- """If the user does not explicitely call close(), it is
+ """If the user does not explicitly call close(), it is
assumed that an error has occurred, so we abort()."""
try:
f = object.__getattribute__(self, '_file')
@@ -1391,9 +1507,9 @@ class LazyItemsDict(UserDict):
lazy_item = self.lazy_items.get(k)
if lazy_item is not None:
if not lazy_item.singleton:
- raise TypeError(_unicode_decode("LazyItemsDict " + \
+ raise TypeError("LazyItemsDict " + \
"deepcopy is unsafe with lazy items that are " + \
- "not singletons: key=%s value=%s") % (k, lazy_item,))
+ "not singletons: key=%s value=%s" % (k, lazy_item,))
UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
return result
@@ -1565,13 +1681,13 @@ def find_updated_config_files(target_root, config_protect):
"""
Return a tuple of configuration files that needs to be updated.
The tuple contains lists organized like this:
- [ protected_dir, file_list ]
+ [protected_dir, file_list]
If the protected config isn't a protected_dir but a procted_file, list is:
- [ protected_file, None ]
+ [protected_file, None]
If no configuration files needs to be updated, None is returned
"""
- os = _os_merge
+ encoding = _encodings['fs']
if config_protect:
# directories with some protect files in them
@@ -1603,10 +1719,24 @@ def find_updated_config_files(target_root, config_protect):
mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
os.path.split(x.rstrip(os.path.sep))
mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
- a = subprocess_getstatusoutput(mycommand)
-
- if a[0] == 0:
- files = a[1].split('\0')
+ cmd = shlex_split(mycommand)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(cmd[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(cmd[0])
+ cmd[0] = fullname
+
+ cmd = [_unicode_encode(arg, encoding=encoding, errors='strict')
+ for arg in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0], encoding=encoding)
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ files = output.split('\0')
# split always produces an empty string as the last element
if files and not files[-1]:
del files[-1]
diff --git a/portage_with_autodep/pym/portage/util/__init__.pyo b/portage_with_autodep/pym/portage/util/__init__.pyo
index 941b286..9bab394 100644
--- a/portage_with_autodep/pym/portage/util/__init__.pyo
+++ b/portage_with_autodep/pym/portage/util/__init__.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_desktop_entry.py b/portage_with_autodep/pym/portage/util/_desktop_entry.py
index 7901780..0b49547 100644
--- a/portage_with_autodep/pym/portage/util/_desktop_entry.py
+++ b/portage_with_autodep/pym/portage/util/_desktop_entry.py
@@ -1,7 +1,8 @@
-# Copyright 2012 Gentoo Foundation
+# Copyright 2012-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import io
+import re
import subprocess
import sys
@@ -10,7 +11,9 @@ try:
except ImportError:
from ConfigParser import Error as ConfigParserError, RawConfigParser
+import portage
from portage import _encodings, _unicode_encode, _unicode_decode
+from portage.util import writemsg
def parse_desktop_entry(path):
"""
@@ -31,45 +34,71 @@ def parse_desktop_entry(path):
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace') as f:
- read_file(f)
+ content = f.read()
+
+ # In Python 3.2, read_file does not support bytes in file names
+ # (see bug #429544), so use StringIO to hide the file name.
+ read_file(io.StringIO(content))
return parser
-_ignored_service_errors = (
- 'error: required key "Name" in group "Desktop Entry" is not present',
- 'error: key "Actions" is present in group "Desktop Entry", but the type is "Service" while this key is only valid for type "Application"',
- 'error: key "MimeType" is present in group "Desktop Entry", but the type is "Service" while this key is only valid for type "Application"',
+_trivial_warnings = re.compile(r' looks redundant with value ')
+
+_ignored_errors = (
+ # Ignore error for emacs.desktop:
+ # https://bugs.freedesktop.org/show_bug.cgi?id=35844#c6
+ 'error: (will be fatal in the future): value "TextEditor" in key "Categories" in group "Desktop Entry" requires another category to be present among the following categories: Utility',
+ 'warning: key "Encoding" in group "Desktop Entry" is deprecated'
+)
+
+_ShowIn_exemptions = (
+ # See bug #480586.
+ 'contains an unregistered value "Pantheon"',
)
def validate_desktop_entry(path):
args = ["desktop-file-validate", path]
- if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
- # Python 3.1 does not support bytes in Popen args.
- args = [_unicode_encode(x, errors='strict') for x in args]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see http://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [_unicode_encode(x, errors='strict') for x in args]
proc = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output_lines = _unicode_decode(proc.communicate()[0]).splitlines()
proc.wait()
if output_lines:
- try:
- desktop_entry = parse_desktop_entry(path)
- except ConfigParserError:
- pass
- else:
- if desktop_entry.has_section("Desktop Entry"):
- try:
- entry_type = desktop_entry.get("Desktop Entry", "Type")
- except ConfigParserError:
- pass
- else:
- if entry_type == "Service":
- # Filter false errors for Type=Service (bug #414125).
- filtered_output = []
- for line in output_lines:
- if line[len(path)+2:] in _ignored_service_errors:
- continue
- filtered_output.append(line)
- output_lines = filtered_output
+ filtered_output = []
+ for line in output_lines:
+ msg = line[len(path)+2:]
+ # "hint:" output is new in desktop-file-utils-0.21
+ if msg.startswith('hint: ') or msg in _ignored_errors:
+ continue
+ if 'for key "NotShowIn" in group "Desktop Entry"' in msg or \
+ 'for key "OnlyShowIn" in group "Desktop Entry"' in msg:
+ exempt = False
+ for s in _ShowIn_exemptions:
+ if s in msg:
+ exempt = True
+ break
+ if exempt:
+ continue
+ filtered_output.append(line)
+ output_lines = filtered_output
+
+ if output_lines:
+ output_lines = [line for line in output_lines
+ if _trivial_warnings.search(line) is None]
return output_lines
+
+if __name__ == "__main__":
+ for arg in sys.argv[1:]:
+ for line in validate_desktop_entry(arg):
+ writemsg(line + "\n", noiselevel=-1)
diff --git a/portage_with_autodep/pym/portage/util/_desktop_entry.pyo b/portage_with_autodep/pym/portage/util/_desktop_entry.pyo
index 7dec17a..c338c3f 100644
--- a/portage_with_autodep/pym/portage/util/_desktop_entry.pyo
+++ b/portage_with_autodep/pym/portage/util/_desktop_entry.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py
index e71ac73..3920f94 100644
--- a/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -26,7 +26,7 @@ class LinkageMapELF(object):
_soname_map_class = slot_dict_class(
("consumers", "providers"), prefix="")
- class _obj_properies_class(object):
+ class _obj_properties_class(object):
__slots__ = ("arch", "needed", "runpaths", "soname", "alt_paths",
"owner",)
@@ -316,7 +316,7 @@ class LinkageMapELF(object):
myprops = obj_properties.get(obj_key)
if myprops is None:
indexed = False
- myprops = self._obj_properies_class(
+ myprops = self._obj_properties_class(
arch, needed, path, soname, [], owner)
obj_properties[obj_key] = myprops
# All object paths are added into the obj_properties tuple.
@@ -678,7 +678,7 @@ class LinkageMapELF(object):
rValue[soname].add(provider)
return rValue
- def findConsumers(self, obj, exclude_providers=None):
+ def findConsumers(self, obj, exclude_providers=None, greedy=True):
"""
Find consumers of an object or object key.
@@ -715,6 +715,9 @@ class LinkageMapELF(object):
'/usr/lib/libssl.so.0.9.8'), and return True if the library is
owned by a provider which is planned for removal.
@type exclude_providers: collection
+ @param greedy: If True, then include consumers that are satisfied
+ by alternative providers, otherwise omit them. Default is True.
+ @type greedy: Boolean
@rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
@return: The return value is a soname -> set-of-library-paths, where
set-of-library-paths satisfy soname.
@@ -769,16 +772,19 @@ class LinkageMapELF(object):
defpath_keys = set(self._path_key(x) for x in self._defpath)
satisfied_consumer_keys = set()
if soname_node is not None:
- if exclude_providers is not None:
+ if exclude_providers is not None or not greedy:
relevant_dir_keys = set()
for provider_key in soname_node.providers:
+ if not greedy and provider_key == obj_key:
+ continue
provider_objs = self._obj_properties[provider_key].alt_paths
for p in provider_objs:
provider_excluded = False
- for excluded_provider_isowner in exclude_providers:
- if excluded_provider_isowner(p):
- provider_excluded = True
- break
+ if exclude_providers is not None:
+ for excluded_provider_isowner in exclude_providers:
+ if excluded_provider_isowner(p):
+ provider_excluded = True
+ break
if not provider_excluded:
# This provider is not excluded. It will
# satisfy a consumer of this soname if it
diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.pyo b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.pyo
index c1e5603..4907057 100644
--- a/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.pyo
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.pyo b/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.pyo
index 8cdd7cb..8d18cdc 100644
--- a/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.pyo
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.pyo b/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.pyo
index 960b66e..86d7f56 100644
--- a/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.pyo
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.py b/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.py
index bbbce52..9ffcc74 100644
--- a/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.py
+++ b/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.py
@@ -1,20 +1,37 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
-import fcntl
import logging
import os
import select
import signal
+import sys
import time
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
from portage.util import writemsg_level
from ..SlotObject import SlotObject
from .PollConstants import PollConstants
from .PollSelectAdapter import PollSelectAdapter
class EventLoop(object):
+ """
+ An event loop, intended to be compatible with the GLib event loop.
+ Call the iteration method in order to execute one iteration of the
+ loop. The idle_add and timeout_add methods serve as thread-safe
+ means to interact with the loop's thread.
+ """
supports_multiprocessing = True
@@ -43,7 +60,9 @@ class EventLoop(object):
that global_event_loop does not need constructor arguments)
@type main: bool
"""
- self._use_signal = main
+ self._use_signal = main and fcntl is not None
+ self._thread_rlock = threading.RLock()
+ self._thread_condition = threading.Condition(self._thread_rlock)
self._poll_event_queue = []
self._poll_event_handlers = {}
self._poll_event_handler_ids = {}
@@ -52,14 +71,48 @@ class EventLoop(object):
self._idle_callbacks = {}
self._timeout_handlers = {}
self._timeout_interval = None
- self._poll_obj = create_poll_instance()
- self.IO_ERR = PollConstants.POLLERR
- self.IO_HUP = PollConstants.POLLHUP
- self.IO_IN = PollConstants.POLLIN
- self.IO_NVAL = PollConstants.POLLNVAL
- self.IO_OUT = PollConstants.POLLOUT
- self.IO_PRI = PollConstants.POLLPRI
+ self._poll_obj = None
+ try:
+ select.epoll
+ except AttributeError:
+ pass
+ else:
+ try:
+ epoll_obj = select.epoll()
+ except IOError:
+ # This happens with Linux 2.4 kernels:
+ # IOError: [Errno 38] Function not implemented
+ pass
+ else:
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(epoll_obj.fileno(), fcntl.F_SETFD,
+ fcntl.fcntl(epoll_obj.fileno(),
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._poll_obj = _epoll_adapter(epoll_obj)
+ self.IO_ERR = select.EPOLLERR
+ self.IO_HUP = select.EPOLLHUP
+ self.IO_IN = select.EPOLLIN
+ self.IO_NVAL = 0
+ self.IO_OUT = select.EPOLLOUT
+ self.IO_PRI = select.EPOLLPRI
+
+ if self._poll_obj is None:
+ self._poll_obj = create_poll_instance()
+ self.IO_ERR = PollConstants.POLLERR
+ self.IO_HUP = PollConstants.POLLHUP
+ self.IO_IN = PollConstants.POLLIN
+ self.IO_NVAL = PollConstants.POLLNVAL
+ self.IO_OUT = PollConstants.POLLOUT
+ self.IO_PRI = PollConstants.POLLPRI
self._child_handlers = {}
self._sigchld_read = None
@@ -67,6 +120,14 @@ class EventLoop(object):
self._sigchld_src_id = None
self._pid = os.getpid()
+ def _new_source_id(self):
+ """
+ Generate a new source id. This method is thread-safe.
+ """
+ with self._thread_rlock:
+ self._event_handler_id += 1
+ return self._event_handler_id
+
def _poll(self, timeout=None):
"""
All poll() calls pass through here. The poll events
@@ -85,9 +146,11 @@ class EventLoop(object):
try:
self._poll_event_queue.extend(self._poll_obj.poll(timeout))
break
- except select.error as e:
+ except (IOError, select.error) as e:
# Silently handle EINTR, which is normal when we have
- # received a signal such as SIGINT.
+ # received a signal such as SIGINT (epoll objects may
+ # raise IOError rather than select.error, at least in
+ # Python 3.2).
if not (e.args and e.args[0] == errno.EINTR):
writemsg_level("\n!!! select error: %s\n" % (e,),
level=logging.ERROR, noiselevel=-1)
@@ -101,7 +164,19 @@ class EventLoop(object):
def iteration(self, *args):
"""
- Like glib.MainContext.iteration(), runs a single iteration.
+ Like glib.MainContext.iteration(), runs a single iteration. In order
+ to avoid blocking forever when may_block is True (the default),
+ callers must be careful to ensure that at least one of the following
+ conditions is met:
+ 1) An event source or timeout is registered which is guaranteed
+ to trigger at least on event (a call to an idle function
+ only counts as an event if it returns a False value which
+ causes it to stop being called)
+ 2) Another thread is guaranteed to call one of the thread-safe
+ methods which notify iteration to stop waiting (such as
+ idle_add or timeout_add).
+ These rules ensure that iteration is able to block until an event
+ arrives, without doing any busy waiting that would waste CPU time.
@type may_block: bool
@param may_block: if True the call may block waiting for an event
(default is True).
@@ -120,23 +195,32 @@ class EventLoop(object):
event_queue = self._poll_event_queue
event_handlers = self._poll_event_handlers
events_handled = 0
+ timeouts_checked = False
if not event_handlers:
- if self._run_timeouts():
- events_handled += 1
- if not event_handlers:
- if not events_handled and may_block and \
- self._timeout_interval is not None:
+ with self._thread_condition:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+ if not event_handlers and not events_handled and may_block:
# Block so that we don't waste cpu time by looping too
# quickly. This makes EventLoop useful for code that needs
# to wait for timeout callbacks regardless of whether or
# not any IO handlers are currently registered.
- try:
- self._poll(timeout=self._timeout_interval)
- except StopIteration:
- pass
+ timeout = self._get_poll_timeout()
+ if timeout is None:
+ wait_timeout = None
+ else:
+ wait_timeout = float(timeout) / 1000
+ # NOTE: In order to avoid a possible infinite wait when
+ # wait_timeout is None, the previous _run_timeouts()
+ # call must have returned False *with* _thread_condition
+ # acquired. Otherwise, we would risk going to sleep after
+ # our only notify event has already passed.
+ self._thread_condition.wait(wait_timeout)
if self._run_timeouts():
events_handled += 1
+ timeouts_checked = True
# If any timeouts have executed, then return immediately,
# in order to minimize latency in termination of iteration
@@ -147,14 +231,18 @@ class EventLoop(object):
if not event_queue:
if may_block:
- if self._child_handlers:
- if self._timeout_interval is None:
- timeout = self._sigchld_interval
- else:
- timeout = min(self._sigchld_interval,
- self._timeout_interval)
- else:
- timeout = self._timeout_interval
+ timeout = self._get_poll_timeout()
+
+ # Avoid blocking for IO if there are any timeout
+ # or idle callbacks available to process.
+ if timeout != 0 and not timeouts_checked:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+ if events_handled:
+ # Minimize latency for loops controlled
+ # by timeout or idle callback events.
+ timeout = 0
else:
timeout = 0
@@ -170,17 +258,37 @@ class EventLoop(object):
while event_queue:
events_handled += 1
f, event = event_queue.pop()
- x = event_handlers[f]
+ try:
+ x = event_handlers[f]
+ except KeyError:
+ # This is known to be triggered by the epoll
+ # implementation in qemu-user-1.2.2, and appears
+ # to be harmless (see bug #451326).
+ continue
if not x.callback(f, event, *x.args):
self.source_remove(x.source_id)
- # Run timeouts last, in order to minimize latency in
- # termination of iteration loops that they may control.
- if self._run_timeouts():
- events_handled += 1
+ if not timeouts_checked:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
return bool(events_handled)
+ def _get_poll_timeout(self):
+
+ with self._thread_rlock:
+ if self._child_handlers:
+ if self._timeout_interval is None:
+ timeout = self._sigchld_interval
+ else:
+ timeout = min(self._sigchld_interval,
+ self._timeout_interval)
+ else:
+ timeout = self._timeout_interval
+
+ return timeout
+
def child_watch_add(self, pid, callback, data=None):
"""
Like glib.child_watch_add(), sets callback to be called with the
@@ -201,18 +309,29 @@ class EventLoop(object):
@rtype: int
@return: an integer ID
"""
- self._event_handler_id += 1
- source_id = self._event_handler_id
+ source_id = self._new_source_id()
self._child_handlers[source_id] = self._child_callback_class(
callback=callback, data=data, pid=pid, source_id=source_id)
if self._use_signal:
if self._sigchld_read is None:
self._sigchld_read, self._sigchld_write = os.pipe()
+
fcntl.fcntl(self._sigchld_read, fcntl.F_SETFL,
fcntl.fcntl(self._sigchld_read,
fcntl.F_GETFL) | os.O_NONBLOCK)
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._sigchld_read, fcntl.F_SETFD,
+ fcntl.fcntl(self._sigchld_read,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
# The IO watch is dynamically registered and unregistered as
# needed, since we don't want to consider it as a valid source
# of events when there are no child listeners. It's important
@@ -276,22 +395,25 @@ class EventLoop(object):
"""
Like glib.idle_add(), if callback returns False it is
automatically removed from the list of event sources and will
- not be called again.
+ not be called again. This method is thread-safe.
@type callback: callable
@param callback: a function to call
@rtype: int
@return: an integer ID
"""
- self._event_handler_id += 1
- source_id = self._event_handler_id
- self._idle_callbacks[source_id] = self._idle_callback_class(
- args=args, callback=callback, source_id=source_id)
+ with self._thread_condition:
+ source_id = self._new_source_id()
+ self._idle_callbacks[source_id] = self._idle_callback_class(
+ args=args, callback=callback, source_id=source_id)
+ self._thread_condition.notify()
return source_id
def _run_idle_callbacks(self):
+ # assumes caller has acquired self._thread_rlock
if not self._idle_callbacks:
- return
+ return False
+ state_change = 0
# Iterate of our local list, since self._idle_callbacks can be
# modified during the exection of these callbacks.
for x in list(self._idle_callbacks.values()):
@@ -304,26 +426,32 @@ class EventLoop(object):
x.calling = True
try:
if not x.callback(*x.args):
+ state_change += 1
self.source_remove(x.source_id)
finally:
x.calling = False
+ return bool(state_change)
+
def timeout_add(self, interval, function, *args):
"""
Like glib.timeout_add(), interval argument is the number of
milliseconds between calls to your function, and your function
should return False to stop being called, or True to continue
being called. Any additional positional arguments given here
- are passed to your function when it's called.
+ are passed to your function when it's called. This method is
+ thread-safe.
"""
- self._event_handler_id += 1
- source_id = self._event_handler_id
- self._timeout_handlers[source_id] = \
- self._timeout_handler_class(
- interval=interval, function=function, args=args,
- source_id=source_id, timestamp=time.time())
- if self._timeout_interval is None or self._timeout_interval > interval:
- self._timeout_interval = interval
+ with self._thread_condition:
+ source_id = self._new_source_id()
+ self._timeout_handlers[source_id] = \
+ self._timeout_handler_class(
+ interval=interval, function=function, args=args,
+ source_id=source_id, timestamp=time.time())
+ if self._timeout_interval is None or \
+ self._timeout_interval > interval:
+ self._timeout_interval = interval
+ self._thread_condition.notify()
return source_id
def _run_timeouts(self):
@@ -333,37 +461,40 @@ class EventLoop(object):
if self._poll_child_processes():
calls += 1
- self._run_idle_callbacks()
-
- if not self._timeout_handlers:
- return bool(calls)
+ with self._thread_rlock:
- ready_timeouts = []
- current_time = time.time()
- for x in self._timeout_handlers.values():
- elapsed_seconds = current_time - x.timestamp
- # elapsed_seconds < 0 means the system clock has been adjusted
- if elapsed_seconds < 0 or \
- (x.interval - 1000 * elapsed_seconds) <= 0:
- ready_timeouts.append(x)
+ if self._run_idle_callbacks():
+ calls += 1
- # Iterate of our local list, since self._timeout_handlers can be
- # modified during the exection of these callbacks.
- for x in ready_timeouts:
- if x.source_id not in self._timeout_handlers:
- # it got cancelled while executing another timeout
- continue
- if x.calling:
- # don't call it recursively
- continue
- calls += 1
- x.calling = True
- try:
- x.timestamp = time.time()
- if not x.function(*x.args):
- self.source_remove(x.source_id)
- finally:
- x.calling = False
+ if not self._timeout_handlers:
+ return bool(calls)
+
+ ready_timeouts = []
+ current_time = time.time()
+ for x in self._timeout_handlers.values():
+ elapsed_seconds = current_time - x.timestamp
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds < 0 or \
+ (x.interval - 1000 * elapsed_seconds) <= 0:
+ ready_timeouts.append(x)
+
+ # Iterate of our local list, since self._timeout_handlers can be
+ # modified during the exection of these callbacks.
+ for x in ready_timeouts:
+ if x.source_id not in self._timeout_handlers:
+ # it got cancelled while executing another timeout
+ continue
+ if x.calling:
+ # don't call it recursively
+ continue
+ calls += 1
+ x.calling = True
+ try:
+ x.timestamp = time.time()
+ if not x.function(*x.args):
+ self.source_remove(x.source_id)
+ finally:
+ x.calling = False
return bool(calls)
@@ -385,8 +516,7 @@ class EventLoop(object):
"""
if f in self._poll_event_handlers:
raise AssertionError("fd %d is already registered" % f)
- self._event_handler_id += 1
- source_id = self._event_handler_id
+ source_id = self._new_source_id()
self._poll_event_handler_ids[source_id] = f
self._poll_event_handlers[f] = self._io_handler_class(
args=args, callback=callback, f=f, source_id=source_id)
@@ -406,18 +536,21 @@ class EventLoop(object):
self.source_remove(self._sigchld_src_id)
self._sigchld_src_id = None
return True
- idle_callback = self._idle_callbacks.pop(reg_id, None)
- if idle_callback is not None:
- return True
- timeout_handler = self._timeout_handlers.pop(reg_id, None)
- if timeout_handler is not None:
- if timeout_handler.interval == self._timeout_interval:
- if self._timeout_handlers:
- self._timeout_interval = \
- min(x.interval for x in self._timeout_handlers.values())
- else:
- self._timeout_interval = None
- return True
+
+ with self._thread_rlock:
+ idle_callback = self._idle_callbacks.pop(reg_id, None)
+ if idle_callback is not None:
+ return True
+ timeout_handler = self._timeout_handlers.pop(reg_id, None)
+ if timeout_handler is not None:
+ if timeout_handler.interval == self._timeout_interval:
+ if self._timeout_handlers:
+ self._timeout_interval = min(x.interval
+ for x in self._timeout_handlers.values())
+ else:
+ self._timeout_interval = None
+ return True
+
f = self._poll_event_handler_ids.pop(reg_id, None)
if f is None:
return False
@@ -467,7 +600,12 @@ def can_poll_device():
return _can_poll_device
p = select.poll()
- p.register(dev_null.fileno(), PollConstants.POLLIN)
+ try:
+ p.register(dev_null.fileno(), PollConstants.POLLIN)
+ except TypeError:
+ # Jython: Object 'org.python.core.io.FileIO@f8f175' is not watchable
+ _can_poll_device = False
+ return _can_poll_device
invalid_request = False
for f, event in p.poll():
@@ -488,3 +626,37 @@ def create_poll_instance():
if can_poll_device():
return select.poll()
return PollSelectAdapter()
+
+class _epoll_adapter(object):
+ """
+ Wraps a select.epoll instance in order to make it compatible
+ with select.poll instances. This is necessary since epoll instances
+ interpret timeout arguments differently. Note that the file descriptor
+ that is associated with an epoll instance will close automatically when
+ it is garbage collected, so it's not necessary to close it explicitly.
+ """
+ __slots__ = ('_epoll_obj',)
+
+ def __init__(self, epoll_obj):
+ self._epoll_obj = epoll_obj
+
+ def register(self, fd, *args):
+ self._epoll_obj.register(fd, *args)
+
+ def unregister(self, fd):
+ self._epoll_obj.unregister(fd)
+
+ def poll(self, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "poll expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ timeout = -1
+ if args:
+ timeout = args[0]
+ if timeout is None or timeout < 0:
+ timeout = -1
+ elif timeout != 0:
+ timeout = float(timeout) / 1000
+
+ return self._epoll_obj.poll(timeout)
diff --git a/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.pyo b/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.pyo
index 6ce2883..948018f 100644
--- a/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.pyo
+++ b/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.pyo b/portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.pyo
index d3453a4..d149863 100644
--- a/portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.pyo
+++ b/portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_eventloop/PollConstants.pyo b/portage_with_autodep/pym/portage/util/_eventloop/PollConstants.pyo
index 6c7c953..876c5b1 100644
--- a/portage_with_autodep/pym/portage/util/_eventloop/PollConstants.pyo
+++ b/portage_with_autodep/pym/portage/util/_eventloop/PollConstants.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.py b/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.py
index 17e63d9..244788c 100644
--- a/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.py
+++ b/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.py
@@ -64,7 +64,7 @@ class PollSelectAdapter(object):
if timeout is not None and timeout < 0:
timeout = None
if timeout is not None:
- select_args.append(timeout / 1000)
+ select_args.append(float(timeout) / 1000)
select_events = select.select(*select_args)
poll_events = []
diff --git a/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.pyo b/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.pyo
index e9ecc51..eac94ca 100644
--- a/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.pyo
+++ b/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_eventloop/__init__.pyo b/portage_with_autodep/pym/portage/util/_eventloop/__init__.pyo
index 69864a6..2b2f9a1 100644
--- a/portage_with_autodep/pym/portage/util/_eventloop/__init__.pyo
+++ b/portage_with_autodep/pym/portage/util/_eventloop/__init__.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.pyo b/portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.pyo
index 3d57192..7a7f087 100644
--- a/portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.pyo
+++ b/portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_pty.pyo b/portage_with_autodep/pym/portage/util/_pty.pyo
index 70b5eb0..0fa01cf 100644
--- a/portage_with_autodep/pym/portage/util/_pty.pyo
+++ b/portage_with_autodep/pym/portage/util/_pty.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/_urlopen.py b/portage_with_autodep/pym/portage/util/_urlopen.py
index 307624b..15f041a 100644
--- a/portage_with_autodep/pym/portage/util/_urlopen.py
+++ b/portage_with_autodep/pym/portage/util/_urlopen.py
@@ -1,7 +1,11 @@
# Copyright 2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import io
import sys
+from datetime import datetime
+from time import mktime
+from email.utils import formatdate, parsedate
try:
from urllib.request import urlopen as _urlopen
@@ -14,29 +18,74 @@ except ImportError:
import urllib2 as urllib_request
from urllib import splituser as urllib_parse_splituser
-def urlopen(url):
- try:
- return _urlopen(url)
- except SystemExit:
- raise
- except Exception:
- if sys.hexversion < 0x3000000:
- raise
- parse_result = urllib_parse.urlparse(url)
- if parse_result.scheme not in ("http", "https") or \
- not parse_result.username:
- raise
-
- return _new_urlopen(url)
-
-def _new_urlopen(url):
- # This is experimental code for bug #413983.
+if sys.hexversion >= 0x3000000:
+ long = int
+
+# to account for the difference between TIMESTAMP of the index' contents
+# and the file-'mtime'
+TIMESTAMP_TOLERANCE = 5
+
+def urlopen(url, if_modified_since=None):
parse_result = urllib_parse.urlparse(url)
- netloc = urllib_parse_splituser(parse_result.netloc)[1]
- url = urllib_parse.urlunparse((parse_result.scheme, netloc, parse_result.path, parse_result.params, parse_result.query, parse_result.fragment))
- password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
- if parse_result.username is not None:
- password_manager.add_password(None, url, parse_result.username, parse_result.password)
- auth_handler = urllib_request.HTTPBasicAuthHandler(password_manager)
- opener = urllib_request.build_opener(auth_handler)
- return opener.open(url)
+ if parse_result.scheme not in ("http", "https"):
+ return _urlopen(url)
+ else:
+ netloc = urllib_parse_splituser(parse_result.netloc)[1]
+ url = urllib_parse.urlunparse((parse_result.scheme, netloc, parse_result.path, parse_result.params, parse_result.query, parse_result.fragment))
+ password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
+ request = urllib_request.Request(url)
+ request.add_header('User-Agent', 'Gentoo Portage')
+ if if_modified_since:
+ request.add_header('If-Modified-Since', _timestamp_to_http(if_modified_since))
+ if parse_result.username is not None:
+ password_manager.add_password(None, url, parse_result.username, parse_result.password)
+ auth_handler = CompressedResponseProcessor(password_manager)
+ opener = urllib_request.build_opener(auth_handler)
+ hdl = opener.open(request)
+ if hdl.headers.get('last-modified', ''):
+ try:
+ add_header = hdl.headers.add_header
+ except AttributeError:
+ # Python 2
+ add_header = hdl.headers.addheader
+ add_header('timestamp', _http_to_timestamp(hdl.headers.get('last-modified')))
+ return hdl
+
+def _timestamp_to_http(timestamp):
+ dt = datetime.fromtimestamp(float(long(timestamp)+TIMESTAMP_TOLERANCE))
+ stamp = mktime(dt.timetuple())
+ return formatdate(timeval=stamp, localtime=False, usegmt=True)
+
+def _http_to_timestamp(http_datetime_string):
+ tuple = parsedate(http_datetime_string)
+ timestamp = mktime(tuple)
+ return str(long(timestamp))
+
+class CompressedResponseProcessor(urllib_request.HTTPBasicAuthHandler):
+ # Handler for compressed responses.
+
+ def http_request(self, req):
+ req.add_header('Accept-Encoding', 'bzip2,gzip,deflate')
+ return req
+ https_request = http_request
+
+ def http_response(self, req, response):
+ decompressed = None
+ if response.headers.get('content-encoding') == 'bzip2':
+ import bz2
+ decompressed = io.BytesIO(bz2.decompress(response.read()))
+ elif response.headers.get('content-encoding') == 'gzip':
+ from gzip import GzipFile
+ decompressed = GzipFile(fileobj=io.BytesIO(response.read()), mode='r')
+ elif response.headers.get('content-encoding') == 'deflate':
+ import zlib
+ try:
+ decompressed = io.BytesIO(zlib.decompress(response.read()))
+ except zlib.error: # they ignored RFC1950
+ decompressed = io.BytesIO(zlib.decompress(response.read(), -zlib.MAX_WBITS))
+ if decompressed:
+ old_response = response
+ response = urllib_request.addinfourl(decompressed, old_response.headers, old_response.url, old_response.code)
+ response.msg = old_response.msg
+ return response
+ https_response = http_response
diff --git a/portage_with_autodep/pym/portage/util/_urlopen.pyo b/portage_with_autodep/pym/portage/util/_urlopen.pyo
index 9f51de8..d548069 100644
--- a/portage_with_autodep/pym/portage/util/_urlopen.pyo
+++ b/portage_with_autodep/pym/portage/util/_urlopen.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/digraph.py b/portage_with_autodep/pym/portage/util/digraph.py
index f3ae658..fc1fb86 100644
--- a/portage_with_autodep/pym/portage/util/digraph.py
+++ b/portage_with_autodep/pym/portage/util/digraph.py
@@ -1,12 +1,13 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['digraph']
from collections import deque
import sys
-from portage import _unicode_decode
from portage.util import writemsg
class digraph(object):
@@ -16,24 +17,24 @@ class digraph(object):
def __init__(self):
"""Create an empty digraph"""
-
+
# { node : ( { child : priority } , { parent : priority } ) }
self.nodes = {}
self.order = []
def add(self, node, parent, priority=0):
"""Adds the specified node with the specified parent.
-
+
If the dep is a soft-dep and the node already has a hard
relationship to the parent, the relationship is left as hard."""
-
+
if node not in self.nodes:
self.nodes[node] = ({}, {}, node)
self.order.append(node)
-
+
if not parent:
return
-
+
if parent not in self.nodes:
self.nodes[parent] = ({}, {}, parent)
self.order.append(parent)
@@ -50,15 +51,15 @@ class digraph(object):
"""Removes the specified node from the digraph, also removing
and ties to other nodes in the digraph. Raises KeyError if the
node doesn't exist."""
-
+
if node not in self.nodes:
raise KeyError(node)
-
+
for parent in self.nodes[node][1]:
del self.nodes[parent][0][node]
for child in self.nodes[node][0]:
del self.nodes[child][1][node]
-
+
del self.nodes[node]
self.order.remove(node)
@@ -157,10 +158,10 @@ class digraph(object):
def leaf_nodes(self, ignore_priority=None):
"""Return all nodes that have no children
-
+
If ignore_soft_deps is True, soft deps are not counted as
children in calculations."""
-
+
leaf_nodes = []
if ignore_priority is None:
for node in self.order:
@@ -191,10 +192,10 @@ class digraph(object):
def root_nodes(self, ignore_priority=None):
"""Return all nodes that have no parents.
-
+
If ignore_soft_deps is True, soft deps are not counted as
parents in calculations."""
-
+
root_nodes = []
if ignore_priority is None:
for node in self.order:
@@ -272,18 +273,17 @@ class digraph(object):
def debug_print(self):
def output(s):
writemsg(s, noiselevel=-1)
- # Use _unicode_decode() to force unicode format
+ # Use unicode_literals to force unicode format
# strings for python-2.x safety, ensuring that
# node.__unicode__() is used when necessary.
for node in self.nodes:
- output(_unicode_decode("%s ") % (node,))
+ output("%s " % (node,))
if self.nodes[node][0]:
output("depends on\n")
else:
output("(no children)\n")
for child, priorities in self.nodes[node][0].items():
- output(_unicode_decode(" %s (%s)\n") % \
- (child, priorities[-1],))
+ output(" %s (%s)\n" % (child, priorities[-1],))
def bfs(self, start, ignore_priority=None):
if start not in self:
diff --git a/portage_with_autodep/pym/portage/util/digraph.pyo b/portage_with_autodep/pym/portage/util/digraph.pyo
index 8e503a6..fc00aa7 100644
--- a/portage_with_autodep/pym/portage/util/digraph.pyo
+++ b/portage_with_autodep/pym/portage/util/digraph.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/env_update.py b/portage_with_autodep/pym/portage/util/env_update.py
index ace4077..5fddaac 100644
--- a/portage_with_autodep/pym/portage/util/env_update.py
+++ b/portage_with_autodep/pym/portage/util/env_update.py
@@ -1,16 +1,17 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['env_update']
import errno
+import glob
import io
import stat
import sys
import time
import portage
-from portage import os, _encodings, _unicode_encode
+from portage import os, _encodings, _unicode_decode, _unicode_encode
from portage.checksum import prelink_capable
from portage.data import ostype
from portage.exception import ParseError
@@ -88,7 +89,8 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
eprefix = settings.get("EPREFIX", "")
eprefix_lstrip = eprefix.lstrip(os.sep)
- envd_dir = os.path.join(target_root, eprefix_lstrip, "etc", "env.d")
+ eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(os.sep) + os.sep
+ envd_dir = os.path.join(eroot, "etc", "env.d")
ensure_dirs(envd_dir, mode=0o755)
fns = listdir(envd_dir, EmptyOnError=1)
fns.sort()
@@ -164,15 +166,14 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
they won't be overwritten by this dict.update call."""
env.update(myconfig)
- ldsoconf_path = os.path.join(
- target_root, eprefix_lstrip, "etc", "ld.so.conf")
+ ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
try:
myld = io.open(_unicode_encode(ldsoconf_path,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='replace')
- myldlines=myld.readlines()
+ myldlines = myld.readlines()
myld.close()
- oldld=[]
+ oldld = []
for x in myldlines:
#each line has at least one char (a newline)
if x[:1] == "#":
@@ -193,20 +194,34 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
myfd.write(x + "\n")
myfd.close()
+ potential_lib_dirs = set()
+ for lib_dir_glob in ('usr/lib*', 'lib*'):
+ x = os.path.join(eroot, lib_dir_glob)
+ for y in glob.glob(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict')):
+ try:
+ y = _unicode_decode(y,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if os.path.basename(y) != 'libexec':
+ potential_lib_dirs.add(y[len(eroot):])
+
# Update prelink.conf if we are prelink-enabled
if prelink_capable:
- newprelink = atomic_ofstream(os.path.join(
- target_root, eprefix_lstrip, "etc", "prelink.conf"))
+ prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
+ ensure_dirs(prelink_d)
+ newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
newprelink.write("# contents of /etc/env.d directory\n")
- for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
- newprelink.write("-l %s\n" % (x,));
- prelink_paths = []
- prelink_paths += specials.get("LDPATH", [])
- prelink_paths += specials.get("PATH", [])
- prelink_paths += specials.get("PRELINK_PATH", [])
- prelink_path_mask = specials.get("PRELINK_PATH_MASK", [])
+ for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
+ newprelink.write('-l /%s\n' % (x,));
+ prelink_paths = set()
+ prelink_paths |= set(specials.get('LDPATH', []))
+ prelink_paths |= set(specials.get('PATH', []))
+ prelink_paths |= set(specials.get('PRELINK_PATH', []))
+ prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
for x in prelink_paths:
if not x:
continue
@@ -227,12 +242,26 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
newprelink.write("-b %s\n" % (x,))
newprelink.close()
+ # Migration code path. If /etc/prelink.conf was generated by us, then
+ # point it to the new stuff until the prelink package re-installs.
+ prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
+ try:
+ with open(_unicode_encode(prelink_conf,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ if f.readline() == b'# prelink.conf autogenerated by env-update; make all changes to\n':
+ f = atomic_ofstream(prelink_conf)
+ f.write('-c /etc/prelink.conf.d/*.conf\n')
+ f.close()
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
current_time = long(time.time())
mtime_changed = False
+
lib_dirs = set()
- for lib_dir in set(specials["LDPATH"] + \
- ['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
- x = os.path.join(target_root, eprefix_lstrip, lib_dir.lstrip(os.sep))
+ for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
+ x = os.path.join(eroot, lib_dir.lstrip(os.sep))
try:
newldpathtime = os.stat(x)[stat.ST_MTIME]
lib_dirs.add(normalize_path(x))
@@ -292,7 +321,7 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
(target_root,))
os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
- elif ostype in ("FreeBSD","DragonFly"):
+ elif ostype in ("FreeBSD", "DragonFly"):
writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
target_root)
os.system(("cd / ; %s -elf -i " + \
@@ -308,11 +337,10 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
#create /etc/profile.env for bash support
- outfile = atomic_ofstream(os.path.join(
- target_root, eprefix_lstrip, "etc", "profile.env"))
+ outfile = atomic_ofstream(os.path.join(eroot, "etc", "profile.env"))
outfile.write(penvnotice)
- env_keys = [ x for x in env if x != "LDPATH" ]
+ env_keys = [x for x in env if x != "LDPATH"]
env_keys.sort()
for k in env_keys:
v = env[k]
@@ -323,8 +351,7 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env,
outfile.close()
#create /etc/csh.env for (t)csh support
- outfile = atomic_ofstream(os.path.join(
- target_root, eprefix_lstrip, "etc", "csh.env"))
+ outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
outfile.write(cenvnotice)
for x in env_keys:
outfile.write("setenv %s '%s'\n" % (x, env[x]))
diff --git a/portage_with_autodep/pym/portage/util/env_update.pyo b/portage_with_autodep/pym/portage/util/env_update.pyo
index ee3b187..2398289 100644
--- a/portage_with_autodep/pym/portage/util/env_update.pyo
+++ b/portage_with_autodep/pym/portage/util/env_update.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/lafilefixer.py b/portage_with_autodep/pym/portage/util/lafilefixer.py
index 54ff20d..2562d9a 100644
--- a/portage_with_autodep/pym/portage/util/lafilefixer.py
+++ b/portage_with_autodep/pym/portage/util/lafilefixer.py
@@ -11,7 +11,7 @@ from portage.exception import InvalidData
# This an re-implementaion of dev-util/lafilefixer-0.5.
# rewrite_lafile() takes the contents of an lafile as a string
# It then parses the dependency_libs and inherited_linker_flags
-# entries.
+# entries.
# We insist on dependency_libs being present. inherited_linker_flags
# is optional.
# There are strict rules about the syntax imposed by libtool's libltdl.
@@ -21,7 +21,7 @@ from portage.exception import InvalidData
# lafilefixer does).
# What it does:
# * Replaces all .la files with absolut paths in dependency_libs with
-# corresponding -l* and -L* entries
+# corresponding -l* and -L* entries
# (/usr/lib64/libfoo.la -> -L/usr/lib64 -lfoo)
# * Moves various flags (see flag_re below) to inherited_linker_flags,
# if such an entry was present.
@@ -36,7 +36,7 @@ from portage.exception import InvalidData
dep_libs_re = re.compile(b"dependency_libs='(?P<value>[^']*)'$")
inh_link_flags_re = re.compile(b"inherited_linker_flags='(?P<value>[^']*)'$")
-#regexes for replacing stuff in -L entries.
+#regexes for replacing stuff in -L entries.
#replace 'X11R6/lib' and 'local/lib' with 'lib', no idea what's this about.
X11_local_sub = re.compile(b"X11R6/lib|local/lib")
#get rid of the '..'
@@ -129,11 +129,11 @@ def rewrite_lafile(contents):
#This allows us to place all -L entries at the beginning
#of 'dependency_libs'.
ladir = dep_libs_entry
-
+
ladir = X11_local_sub.sub(b"lib", ladir)
ladir = pkgconfig_sub1.sub(b"usr", ladir)
ladir = pkgconfig_sub2.sub(b"\g<usrlib>", ladir)
-
+
if ladir not in libladir:
libladir.append(ladir)
diff --git a/portage_with_autodep/pym/portage/util/lafilefixer.pyo b/portage_with_autodep/pym/portage/util/lafilefixer.pyo
index a6e06ab..5378d82 100644
--- a/portage_with_autodep/pym/portage/util/lafilefixer.pyo
+++ b/portage_with_autodep/pym/portage/util/lafilefixer.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/listdir.py b/portage_with_autodep/pym/portage/util/listdir.py
index c2628cb..2012e14 100644
--- a/portage_with_autodep/pym/portage/util/listdir.py
+++ b/portage_with_autodep/pym/portage/util/listdir.py
@@ -1,36 +1,33 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['cacheddir', 'listdir']
import errno
import stat
-import time
+import sys
+
+if sys.hexversion < 0x3000000:
+ from itertools import izip as zip
from portage import os
+from portage.const import VCS_DIRS
from portage.exception import DirectoryNotFound, PermissionDenied, PortageException
-from portage.util import normalize_path, writemsg
-
-_ignorecvs_dirs = ('CVS', 'RCS', 'SCCS', '.svn', '.git')
+from portage.util import normalize_path
+
+# The global dircache is no longer supported, since it could
+# be a memory leak for API consumers. Any cacheddir callers
+# should use higher-level caches instead, when necessary.
+# TODO: Remove dircache variable after stable portage does
+# not use is (keep it for now, in case API consumers clear
+# it manually).
dircache = {}
-cacheHit = 0
-cacheMiss = 0
-cacheStale = 0
def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
- global cacheHit,cacheMiss,cacheStale
mypath = normalize_path(my_original_path)
- if mypath in dircache:
- cacheHit += 1
- cached_mtime, list, ftype = dircache[mypath]
- else:
- cacheMiss += 1
- cached_mtime, list, ftype = -1, [], []
try:
pathstat = os.stat(mypath)
- if stat.S_ISDIR(pathstat[stat.ST_MODE]):
- mtime = pathstat.st_mtime
- else:
+ if not stat.S_ISDIR(pathstat.st_mode):
raise DirectoryNotFound(mypath)
except EnvironmentError as e:
if e.errno == PermissionDenied.errno:
@@ -39,19 +36,16 @@ def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymli
return [], []
except PortageException:
return [], []
- # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
- if mtime != cached_mtime or time.time() - mtime < 4:
- if mypath in dircache:
- cacheStale += 1
+ else:
try:
- list = os.listdir(mypath)
+ fpaths = os.listdir(mypath)
except EnvironmentError as e:
if e.errno != errno.EACCES:
raise
del e
raise PermissionDenied(mypath)
ftype = []
- for x in list:
+ for x in fpaths:
try:
if followSymlinks:
pathstat = os.stat(mypath+"/"+x)
@@ -68,23 +62,22 @@ def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymli
ftype.append(3)
except (IOError, OSError):
ftype.append(3)
- dircache[mypath] = mtime, list, ftype
-
- ret_list = []
- ret_ftype = []
- for x in range(0, len(list)):
- if list[x] in ignorelist:
- pass
- elif ignorecvs:
- if list[x][:2] != ".#" and \
- not (ftype[x] == 1 and list[x] in _ignorecvs_dirs):
- ret_list.append(list[x])
- ret_ftype.append(ftype[x])
- else:
- ret_list.append(list[x])
- ret_ftype.append(ftype[x])
-
- writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
+
+ if ignorelist or ignorecvs:
+ ret_list = []
+ ret_ftype = []
+ for file_path, file_type in zip(fpaths, ftype):
+ if file_path in ignorelist:
+ pass
+ elif ignorecvs:
+ if file_path[:2] != ".#" and \
+ not (file_type == 1 and file_path in VCS_DIRS):
+ ret_list.append(file_path)
+ ret_ftype.append(file_type)
+ else:
+ ret_list = fpaths
+ ret_ftype = ftype
+
return ret_list, ret_ftype
def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
@@ -98,7 +91,7 @@ def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelis
@type recursive: Boolean
@param filesonly; Only return files, not more directories
@type filesonly: Boolean
- @param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git')
+ @param ignorecvs: Ignore VCS directories
@type ignorecvs: Boolean
@param ignorelist: List of filenames/directories to exclude
@type ignorelist: List
@@ -112,40 +105,35 @@ def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelis
@return: A list of files and directories (or just files or just directories) or an empty list.
"""
- list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
+ fpaths, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
- if list is None:
- list=[]
+ if fpaths is None:
+ fpaths = []
if ftype is None:
- ftype=[]
+ ftype = []
if not (filesonly or dirsonly or recursive):
- return list
+ return fpaths
if recursive:
- x=0
- while x<len(ftype):
- if ftype[x] == 1:
- l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
- followSymlinks)
-
- l=l[:]
- for y in range(0,len(l)):
- l[y]=list[x]+"/"+l[y]
- list=list+l
- ftype=ftype+f
- x+=1
+ stack = list(zip(fpaths, ftype))
+ fpaths = []
+ ftype = []
+ while stack:
+ file_path, file_type = stack.pop()
+ fpaths.append(file_path)
+ ftype.append(file_type)
+ if file_type == 1:
+ subdir_list, subdir_types = cacheddir(
+ os.path.join(mypath, file_path), ignorecvs,
+ ignorelist, EmptyOnError, followSymlinks)
+ stack.extend((os.path.join(file_path, x), x_type)
+ for x, x_type in zip(subdir_list, subdir_types))
+
if filesonly:
- rlist=[]
- for x in range(0,len(ftype)):
- if ftype[x]==0:
- rlist=rlist+[list[x]]
+ fpaths = [x for x, x_type in zip(fpaths, ftype) if x_type == 0]
+
elif dirsonly:
- rlist = []
- for x in range(0, len(ftype)):
- if ftype[x] == 1:
- rlist = rlist + [list[x]]
- else:
- rlist=list
+ fpaths = [x for x, x_type in zip(fpaths, ftype) if x_type == 1]
- return rlist
+ return fpaths
diff --git a/portage_with_autodep/pym/portage/util/listdir.pyo b/portage_with_autodep/pym/portage/util/listdir.pyo
index 0f02d6d..e4d40be 100644
--- a/portage_with_autodep/pym/portage/util/listdir.pyo
+++ b/portage_with_autodep/pym/portage/util/listdir.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/movefile.py b/portage_with_autodep/pym/portage/util/movefile.py
index 10577b5..452e77f 100644
--- a/portage_with_autodep/pym/portage/util/movefile.py
+++ b/portage_with_autodep/pym/portage/util/movefile.py
@@ -1,18 +1,22 @@
-# Copyright 2010-2012 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
__all__ = ['movefile']
import errno
+import fnmatch
import os as _os
import shutil as _shutil
import stat
+import sys
import subprocess
import textwrap
import portage
from portage import bsd_chflags, _encodings, _os_overrides, _selinux, \
- _unicode_decode, _unicode_encode, _unicode_func_wrapper,\
+ _unicode_decode, _unicode_encode, _unicode_func_wrapper, \
_unicode_module_wrapper
from portage.const import MOVE_BINARY
from portage.exception import OperationNotSupported
@@ -24,43 +28,113 @@ def _apply_stat(src_stat, dest):
_os.chown(dest, src_stat.st_uid, src_stat.st_gid)
_os.chmod(dest, stat.S_IMODE(src_stat.st_mode))
+_xattr_excluder_cache = {}
+
+def _get_xattr_excluder(pattern):
+
+ try:
+ value = _xattr_excluder_cache[pattern]
+ except KeyError:
+ value = _xattr_excluder(pattern)
+ _xattr_excluder_cache[pattern] = value
+
+ return value
+
+class _xattr_excluder(object):
+
+ __slots__ = ('_pattern_split',)
+
+ def __init__(self, pattern):
+
+ if pattern is None:
+ self._pattern_split = None
+ else:
+ pattern = pattern.split()
+ if not pattern:
+ self._pattern_split = None
+ else:
+ pattern.sort()
+ self._pattern_split = tuple(pattern)
+
+ def __call__(self, attr):
+
+ if self._pattern_split is None:
+ return False
+
+ match = fnmatch.fnmatch
+ for x in self._pattern_split:
+ if match(attr, x):
+ return True
+
+ return False
+
if hasattr(_os, "getxattr"):
# Python >=3.3 and GNU/Linux
- def _copyxattr(src, dest):
- for attr in _os.listxattr(src):
+ def _copyxattr(src, dest, exclude=None):
+
+ try:
+ attrs = _os.listxattr(src)
+ except OSError as e:
+ if e.errno != OperationNotSupported.errno:
+ raise
+ attrs = ()
+ if attrs:
+ if exclude is not None and isinstance(attrs[0], bytes):
+ exclude = exclude.encode(_encodings['fs'])
+ exclude = _get_xattr_excluder(exclude)
+
+ for attr in attrs:
+ if exclude(attr):
+ continue
try:
_os.setxattr(dest, attr, _os.getxattr(src, attr))
raise_exception = False
except OSError:
raise_exception = True
if raise_exception:
- raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
+ raise OperationNotSupported(_("Filesystem containing file '%s' "
+ "does not support extended attribute '%s'") %
+ (_unicode_decode(dest), _unicode_decode(attr)))
else:
try:
import xattr
except ImportError:
xattr = None
if xattr is not None:
- def _copyxattr(src, dest):
- for attr in xattr.list(src):
+ def _copyxattr(src, dest, exclude=None):
+
+ try:
+ attrs = xattr.list(src)
+ except IOError as e:
+ if e.errno != OperationNotSupported.errno:
+ raise
+ attrs = ()
+
+ if attrs:
+ if exclude is not None and isinstance(attrs[0], bytes):
+ exclude = exclude.encode(_encodings['fs'])
+ exclude = _get_xattr_excluder(exclude)
+
+ for attr in attrs:
+ if exclude(attr):
+ continue
try:
xattr.set(dest, attr, xattr.get(src, attr))
raise_exception = False
except IOError:
raise_exception = True
if raise_exception:
- raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
+ raise OperationNotSupported(_("Filesystem containing file '%s' "
+ "does not support extended attribute '%s'") %
+ (_unicode_decode(dest), _unicode_decode(attr)))
else:
- _devnull = open("/dev/null", "wb")
try:
- subprocess.call(["getfattr", "--version"], stdout=_devnull)
- subprocess.call(["setfattr", "--version"], stdout=_devnull)
- _has_getfattr_and_setfattr = True
+ with open(_os.devnull, 'wb') as f:
+ subprocess.call(["getfattr", "--version"], stdout=f)
+ subprocess.call(["setfattr", "--version"], stdout=f)
except OSError:
- _has_getfattr_and_setfattr = False
- _devnull.close()
- if _has_getfattr_and_setfattr:
- def _copyxattr(src, dest):
+ def _copyxattr(src, dest, exclude=None):
+ # TODO: implement exclude
getfattr_process = subprocess.Popen(["getfattr", "-d", "--absolute-names", src], stdout=subprocess.PIPE)
getfattr_process.wait()
extended_attributes = getfattr_process.stdout.readlines()
@@ -72,14 +146,15 @@ else:
if setfattr_process.returncode != 0:
raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
else:
- def _copyxattr(src, dest):
+ def _copyxattr(src, dest, exclude=None):
pass
def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
hardlink_candidates=None, encoding=_encodings['fs']):
"""moves a file from src to dest, preserving all permissions and attributes; mtime will
- be preserved even when moving across filesystems. Returns true on success and false on
- failure. Move is atomic."""
+ be preserved even when moving across filesystems. Returns mtime as integer on success
+ and None on failure. mtime is expressed in seconds in Python <3.3 and nanoseconds in
+ Python >=3.3. Move is atomic."""
if mysettings is None:
mysettings = portage.settings
@@ -102,22 +177,22 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
try:
if not sstat:
- sstat=os.lstat(src)
+ sstat = os.lstat(src)
except SystemExit as e:
raise
except Exception as e:
writemsg("!!! %s\n" % _("Stating source file failed... movefile()"),
noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
- destexists=1
+ destexists = 1
try:
- dstat=os.lstat(dest)
+ dstat = os.lstat(dest)
except (OSError, IOError):
- dstat=os.lstat(os.path.dirname(dest))
- destexists=0
+ dstat = os.lstat(os.path.dirname(dest))
+ destexists = 0
if bsd_chflags:
if destexists and dstat.st_flags != 0:
@@ -132,7 +207,7 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if stat.S_ISLNK(dstat[stat.ST_MODE]):
try:
os.unlink(dest)
- destexists=0
+ destexists = 0
except SystemExit as e:
raise
except Exception as e:
@@ -140,7 +215,7 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if stat.S_ISLNK(sstat[stat.ST_MODE]):
try:
- target=os.readlink(src)
+ target = os.readlink(src)
if mysettings and "D" in mysettings and \
target.startswith(mysettings["D"]):
target = target[len(mysettings["D"])-1:]
@@ -159,17 +234,32 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if e.errno not in (errno.ENOENT, errno.EEXIST) or \
target != os.readlink(dest):
raise
- lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
- # utime() only works on the target of a symlink, so it's not
- # possible to perserve mtime on symlinks.
- return os.lstat(dest)[stat.ST_MTIME]
+ lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
+
+ try:
+ _os.unlink(src_bytes)
+ except OSError:
+ pass
+
+ if sys.hexversion >= 0x3030000:
+ try:
+ os.utime(dest, ns=(sstat.st_mtime_ns, sstat.st_mtime_ns), follow_symlinks=False)
+ except NotImplementedError:
+ # utimensat() and lutimes() missing in libc.
+ return os.stat(dest, follow_symlinks=False).st_mtime_ns
+ else:
+ return sstat.st_mtime_ns
+ else:
+ # utime() in Python <3.3 only works on the target of a symlink, so it's not
+ # possible to preserve mtime on symlinks.
+ return os.lstat(dest)[stat.ST_MTIME]
except SystemExit as e:
raise
except Exception as e:
writemsg("!!! %s\n" % _("failed to properly create symlink:"),
noiselevel=-1)
writemsg("!!! %s -> %s\n" % (dest, target), noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
hardlinked = False
@@ -204,9 +294,13 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
hardlinked = True
+ try:
+ _os.unlink(src_bytes)
+ except OSError:
+ pass
break
- renamefailed=1
+ renamefailed = 1
if hardlinked:
renamefailed = False
if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
@@ -214,14 +308,14 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
if selinux_enabled:
selinux.rename(src, dest)
else:
- os.rename(src,dest)
- renamefailed=0
+ os.rename(src, dest)
+ renamefailed = 0
except OSError as e:
if e.errno != errno.EXDEV:
# Some random error.
writemsg("!!! %s\n" % _("Failed to move %(src)s to %(dest)s") %
{"src": src, "dest": dest}, noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
if renamefailed:
@@ -233,7 +327,8 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
_copyfile(src_bytes, dest_tmp_bytes)
if xattr_enabled:
try:
- _copyxattr(src_bytes, dest_tmp_bytes)
+ _copyxattr(src_bytes, dest_tmp_bytes,
+ exclude=mysettings.get("PORTAGE_XATTR_EXCLUDE", "security.* system.nfs4_acl"))
except SystemExit:
raise
except:
@@ -252,7 +347,7 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
except Exception as e:
writemsg("!!! %s\n" % _('copy %(src)s -> %(dest)s failed.') %
{"src": src, "dest": dest}, noiselevel=-1)
- writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
return None
else:
#we don't yet handle special, so we need to fall back to /bin/mv
@@ -265,35 +360,54 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
writemsg("!!! %s\n" % a, noiselevel=-1)
return None # failure
- # Always use stat_obj[stat.ST_MTIME] for the integral timestamp which
- # is returned, since the stat_obj.st_mtime float attribute rounds *up*
+ # In Python <3.3 always use stat_obj[stat.ST_MTIME] for the integral timestamp
+ # which is returned, since the stat_obj.st_mtime float attribute rounds *up*
# if the nanosecond part of the timestamp is 999999881 ns or greater.
try:
if hardlinked:
- newmtime = os.stat(dest)[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ newmtime = os.stat(dest).st_mtime_ns
+ else:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
else:
# Note: It is not possible to preserve nanosecond precision
# (supported in POSIX.1-2008 via utimensat) with the IEEE 754
# double precision float which only has a 53 bit significand.
if newmtime is not None:
- os.utime(dest, (newmtime, newmtime))
+ if sys.hexversion >= 0x3030000:
+ os.utime(dest, ns=(newmtime, newmtime))
+ else:
+ os.utime(dest, (newmtime, newmtime))
else:
- newmtime = sstat[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ newmtime = sstat.st_mtime_ns
+ else:
+ newmtime = sstat[stat.ST_MTIME]
if renamefailed:
- # If rename succeeded then timestamps are automatically
- # preserved with complete precision because the source
- # and destination inode are the same. Otherwise, round
- # down to the nearest whole second since python's float
- # st_mtime cannot be used to preserve the st_mtim.tv_nsec
- # field with complete precision. Note that we have to use
- # stat_obj[stat.ST_MTIME] here because the float
- # stat_obj.st_mtime rounds *up* sometimes.
- os.utime(dest, (newmtime, newmtime))
+ if sys.hexversion >= 0x3030000:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inodes are the same. Otherwise, manually
+ # update timestamps with nanosecond precision.
+ os.utime(dest, ns=(newmtime, newmtime))
+ else:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inodes are the same. Otherwise, round
+ # down to the nearest whole second since python's float
+ # st_mtime cannot be used to preserve the st_mtim.tv_nsec
+ # field with complete precision. Note that we have to use
+ # stat_obj[stat.ST_MTIME] here because the float
+ # stat_obj.st_mtime rounds *up* sometimes.
+ os.utime(dest, (newmtime, newmtime))
except OSError:
# The utime can fail here with EPERM even though the move succeeded.
# Instead of failing, use stat to return the mtime if possible.
try:
- newmtime = os.stat(dest)[stat.ST_MTIME]
+ if sys.hexversion >= 0x3030000:
+ newmtime = os.stat(dest).st_mtime_ns
+ else:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
except OSError as e:
writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
writemsg("!!! %s\n" % dest, noiselevel=-1)
diff --git a/portage_with_autodep/pym/portage/util/movefile.pyo b/portage_with_autodep/pym/portage/util/movefile.pyo
index 1228ee7..690b9b6 100644
--- a/portage_with_autodep/pym/portage/util/movefile.pyo
+++ b/portage_with_autodep/pym/portage/util/movefile.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/mtimedb.pyo b/portage_with_autodep/pym/portage/util/mtimedb.pyo
index fda479a..6fe9b37 100644
--- a/portage_with_autodep/pym/portage/util/mtimedb.pyo
+++ b/portage_with_autodep/pym/portage/util/mtimedb.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/portage/util/whirlpool.py b/portage_with_autodep/pym/portage/util/whirlpool.py
index c696f6f..170ae73 100644
--- a/portage_with_autodep/pym/portage/util/whirlpool.py
+++ b/portage_with_autodep/pym/portage/util/whirlpool.py
@@ -639,6 +639,8 @@ def WhirlpoolInit(ctx):
return
def WhirlpoolAdd(source, sourceBits, ctx):
+ if not isinstance(source, bytes):
+ raise TypeError("Expected %s, got %s" % (bytes, type(source)))
if sys.hexversion < 0x3000000:
source = [ord(s)&0xff for s in source]
diff --git a/portage_with_autodep/pym/portage/util/whirlpool.pyo b/portage_with_autodep/pym/portage/util/whirlpool.pyo
index 4bcf49a..98e2815 100644
--- a/portage_with_autodep/pym/portage/util/whirlpool.pyo
+++ b/portage_with_autodep/pym/portage/util/whirlpool.pyo
Binary files differ