summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarius Mauch <genone@gentoo.org>2007-01-25 15:49:26 +0000
committerMarius Mauch <genone@gentoo.org>2007-01-25 15:49:26 +0000
commit3b08c21101b0801d7c5d6c145a27bef5cd42078c (patch)
tree2eea73b311d67b567410670630335796bf0a272c /pym/portage
parentdemote KEYWORDS.missing to a warning to make KEYWORDS='' valid (diff)
downloadportage-multirepo-3b08c21101b0801d7c5d6c145a27bef5cd42078c.tar.gz
portage-multirepo-3b08c21101b0801d7c5d6c145a27bef5cd42078c.tar.bz2
portage-multirepo-3b08c21101b0801d7c5d6c145a27bef5cd42078c.zip
Namespace sanitizing, step 1
svn path=/main/trunk/; revision=5778
Diffstat (limited to 'pym/portage')
-rw-r--r--pym/portage/__init__.py8281
-rw-r--r--pym/portage/cache/__init__.py5
-rw-r--r--pym/portage/cache/anydbm.py72
-rw-r--r--pym/portage/cache/cache_errors.py41
-rw-r--r--pym/portage/cache/flat_hash.py120
-rw-r--r--pym/portage/cache/flat_list.py106
-rw-r--r--pym/portage/cache/fs_template.py74
-rw-r--r--pym/portage/cache/mappings.py103
-rw-r--r--pym/portage/cache/metadata.py87
-rw-r--r--pym/portage/cache/metadata_overlay.py105
-rw-r--r--pym/portage/cache/sql_template.py275
-rw-r--r--pym/portage/cache/sqlite.py236
-rw-r--r--pym/portage/cache/template.py200
-rw-r--r--pym/portage/cache/util.py129
-rw-r--r--pym/portage/cache/volatile.py27
-rw-r--r--pym/portage/checksum.py219
-rw-r--r--pym/portage/const.py65
-rw-r--r--pym/portage/cvstree.py295
-rw-r--r--pym/portage/data.py126
-rw-r--r--pym/portage/debug.py115
-rw-r--r--pym/portage/dep.py646
-rw-r--r--pym/portage/dispatch_conf.py161
-rw-r--r--pym/portage/eclass_cache.py83
-rw-r--r--pym/portage/elog_modules/__init__.py0
-rw-r--r--pym/portage/elog_modules/mod_custom.py16
-rw-r--r--pym/portage/elog_modules/mod_mail.py22
-rw-r--r--pym/portage/elog_modules/mod_mail_summary.py40
-rw-r--r--pym/portage/elog_modules/mod_save.py21
-rw-r--r--pym/portage/elog_modules/mod_save_summary.py23
-rw-r--r--pym/portage/elog_modules/mod_syslog.py17
-rw-r--r--pym/portage/emergehelp.py420
-rw-r--r--pym/portage/exception.py100
-rw-r--r--pym/portage/exec.py336
-rw-r--r--pym/portage/getbinpkg.py572
-rw-r--r--pym/portage/gpg.py149
-rw-r--r--pym/portage/localization.py21
-rw-r--r--pym/portage/locks.py312
-rw-r--r--pym/portage/mail.py89
-rw-r--r--pym/portage/manifest.py618
-rw-r--r--pym/portage/news.py268
-rw-r--r--pym/portage/output.py393
-rw-r--r--pym/portage/selinux.py8
-rw-r--r--pym/portage/update.py224
-rw-r--r--pym/portage/util.py1037
-rw-r--r--pym/portage/versions.py314
-rw-r--r--pym/portage/xpak.py421
46 files changed, 16992 insertions, 0 deletions
diff --git a/pym/portage/__init__.py b/pym/portage/__init__.py
new file mode 100644
index 00000000..98303857
--- /dev/null
+++ b/pym/portage/__init__.py
@@ -0,0 +1,8281 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+VERSION="$Rev$"[6:-2] + "-svn"
+
+# ===========================================================================
+# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
+# ===========================================================================
+
+try:
+ import sys
+except ImportError:
+ print "Failed to import sys! Something is _VERY_ wrong with python."
+ raise
+
+try:
+ import copy, errno, os, re, shutil, time, types
+ try:
+ import cPickle
+ except ImportError:
+ import pickle as cPickle
+
+ import stat
+ import commands
+ from time import sleep
+ from random import shuffle
+ import UserDict
+ if getattr(__builtins__, "set", None) is None:
+ from sets import Set as set
+ from itertools import chain, izip
+except ImportError, e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
+ sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
+ sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
+
+ sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
+ sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
+ sys.stderr.write(" "+str(e)+"\n\n");
+ raise
+
+try:
+ # XXX: This should get renamed to bsd_chflags, I think.
+ import chflags
+ bsd_chflags = chflags
+except ImportError:
+ bsd_chflags = None
+
+try:
+ from cache.cache_errors import CacheError
+ import cvstree
+ import xpak
+ import getbinpkg
+ import portage_dep
+ from portage_dep import dep_getcpv, dep_getkey, get_operator, \
+ isjustname, isspecific, isvalidatom, \
+ match_from_list, match_to_list, best_match_to_list
+
+ # XXX: This needs to get cleaned up.
+ import output
+ from output import bold, colorize, green, red, yellow
+
+ import portage_const
+ from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
+ USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
+ PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
+ EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
+ MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
+ DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
+ INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
+ INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
+
+ from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
+ portage_uid, portage_gid, userpriv_groups
+ from portage_manifest import Manifest
+
+ import portage_util
+ from portage_util import atomic_ofstream, apply_secpass_permissions, apply_recursive_permissions, \
+ dump_traceback, getconfig, grabdict, grabdict_package, grabfile, grabfile_package, \
+ map_dictlist_vals, new_protect_filename, normalize_path, \
+ pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
+ unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
+ import portage_exception
+ import portage_gpg
+ import portage_locks
+ import portage_exec
+ from portage_exec import atexit_register, run_exitfuncs
+ from portage_locks import unlockfile,unlockdir,lockfile,lockdir
+ import portage_checksum
+ from portage_checksum import perform_md5,perform_checksum,prelink_capable
+ import eclass_cache
+ from portage_localization import _
+ from portage_update import dep_transform, fixdbentries, grab_updates, \
+ parse_updates, update_config_files, update_dbentries
+
+ # Need these functions directly in portage namespace to not break every external tool in existence
+ from portage_versions import best, catpkgsplit, catsplit, pkgcmp, \
+ pkgsplit, vercmp, ververify
+
+ # endversion and endversion_keys are for backward compatibility only.
+ from portage_versions import endversion_keys
+ from portage_versions import suffix_value as endversion
+
+except ImportError, e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
+ sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
+ sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
+ sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
+ sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
+ sys.stderr.write("!!! a recovery of portage.\n")
+ sys.stderr.write(" "+str(e)+"\n\n")
+ raise
+
+
+try:
+ import portage_selinux as selinux
+except OSError, e:
+ writemsg("!!! SELinux not loaded: %s\n" % str(e), noiselevel=-1)
+ del e
+except ImportError:
+ pass
+
+# ===========================================================================
+# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
+# ===========================================================================
+
+
+def load_mod(name):
+ modname = ".".join(name.split(".")[:-1])
+ mod = __import__(modname)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
+ for x in key_order:
+ if top_dict.has_key(x) and top_dict[x].has_key(key):
+ if FullCopy:
+ return copy.deepcopy(top_dict[x][key])
+ else:
+ return top_dict[x][key]
+ if EmptyOnError:
+ return ""
+ else:
+ raise KeyError, "Key not found in list; '%s'" % key
+
+def getcwd():
+ "this fixes situations where the current directory doesn't exist"
+ try:
+ return os.getcwd()
+ except OSError: #dir doesn't exist
+ os.chdir("/")
+ return "/"
+getcwd()
+
+def abssymlink(symlink):
+ "This reads symlinks, resolving the relative symlinks, and returning the absolute."
+ mylink=os.readlink(symlink)
+ if mylink[0] != '/':
+ mydir=os.path.dirname(symlink)
+ mylink=mydir+"/"+mylink
+ return os.path.normpath(mylink)
+
+dircache = {}
+cacheHit=0
+cacheMiss=0
+cacheStale=0
+def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
+ global cacheHit,cacheMiss,cacheStale
+ mypath = normalize_path(my_original_path)
+ if dircache.has_key(mypath):
+ cacheHit += 1
+ cached_mtime, list, ftype = dircache[mypath]
+ else:
+ cacheMiss += 1
+ cached_mtime, list, ftype = -1, [], []
+ try:
+ pathstat = os.stat(mypath)
+ if stat.S_ISDIR(pathstat[stat.ST_MODE]):
+ mtime = pathstat[stat.ST_MTIME]
+ else:
+ raise portage_exception.DirectoryNotFound(mypath)
+ except (IOError,OSError,portage_exception.PortageException):
+ if EmptyOnError:
+ return [], []
+ return None, None
+ # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
+ if mtime != cached_mtime or time.time() - mtime < 4:
+ if dircache.has_key(mypath):
+ cacheStale += 1
+ list = os.listdir(mypath)
+ ftype = []
+ for x in list:
+ try:
+ if followSymlinks:
+ pathstat = os.stat(mypath+"/"+x)
+ else:
+ pathstat = os.lstat(mypath+"/"+x)
+
+ if stat.S_ISREG(pathstat[stat.ST_MODE]):
+ ftype.append(0)
+ elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
+ ftype.append(1)
+ elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
+ ftype.append(2)
+ else:
+ ftype.append(3)
+ except (IOError, OSError):
+ ftype.append(3)
+ dircache[mypath] = mtime, list, ftype
+
+ ret_list = []
+ ret_ftype = []
+ for x in range(0, len(list)):
+ if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
+ ret_list.append(list[x])
+ ret_ftype.append(ftype[x])
+ elif (list[x] not in ignorelist):
+ ret_list.append(list[x])
+ ret_ftype.append(ftype[x])
+
+ writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
+ return ret_list, ret_ftype
+
+def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
+ EmptyOnError=False, dirsonly=False):
+ """
+ Portage-specific implementation of os.listdir
+
+ @param mypath: Path whose contents you wish to list
+ @type mypath: String
+ @param recursive: Recursively scan directories contained within mypath
+ @type recursive: Boolean
+ @param filesonly; Only return files, not more directories
+ @type filesonly: Boolean
+ @param ignorecvs: Ignore CVS directories ('CVS','.svn','SCCS')
+ @type ignorecvs: Boolean
+ @param ignorelist: List of filenames/directories to exclude
+ @type ignorelist: List
+ @param followSymlinks: Follow Symlink'd files and directories
+ @type followSymlinks: Boolean
+ @param EmptyOnError: Return [] if an error occurs.
+ @type EmptyOnError: Boolean
+ @param dirsonly: Only return directories.
+ @type dirsonly: Boolean
+ @rtype: List
+ @returns: A list of files and directories (or just files or just directories) or an empty list.
+ """
+
+ list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
+
+ if list is None:
+ list=[]
+ if ftype is None:
+ ftype=[]
+
+ if not (filesonly or dirsonly or recursive):
+ return list
+
+ if recursive:
+ x=0
+ while x<len(ftype):
+ if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
+ l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
+ followSymlinks)
+
+ l=l[:]
+ for y in range(0,len(l)):
+ l[y]=list[x]+"/"+l[y]
+ list=list+l
+ ftype=ftype+f
+ x+=1
+ if filesonly:
+ rlist=[]
+ for x in range(0,len(ftype)):
+ if ftype[x]==0:
+ rlist=rlist+[list[x]]
+ elif dirsonly:
+ rlist = []
+ for x in range(0, len(ftype)):
+ if ftype[x] == 1:
+ rlist = rlist + [list[x]]
+ else:
+ rlist=list
+
+ return rlist
+
+def flatten(mytokens):
+ """this function now turns a [1,[2,3]] list into
+ a [1,2,3] list and returns it."""
+ newlist=[]
+ for x in mytokens:
+ if type(x)==types.ListType:
+ newlist.extend(flatten(x))
+ else:
+ newlist.append(x)
+ return newlist
+
+#beautiful directed graph object
+
+class digraph:
+ def __init__(self):
+ """Create an empty digraph"""
+
+ # { node : ( { child : priority } , { parent : priority } ) }
+ self.nodes = {}
+ self.order = []
+
+ def add(self, node, parent, priority=0):
+ """Adds the specified node with the specified parent.
+
+ If the dep is a soft-dep and the node already has a hard
+ relationship to the parent, the relationship is left as hard."""
+
+ if node not in self.nodes:
+ self.nodes[node] = ({}, {})
+ self.order.append(node)
+
+ if not parent:
+ return
+
+ if parent not in self.nodes:
+ self.nodes[parent] = ({}, {})
+ self.order.append(parent)
+
+ if parent in self.nodes[node][1]:
+ if priority > self.nodes[node][1][parent]:
+ self.nodes[node][1][parent] = priority
+ else:
+ self.nodes[node][1][parent] = priority
+
+ if node in self.nodes[parent][0]:
+ if priority > self.nodes[parent][0][node]:
+ self.nodes[parent][0][node] = priority
+ else:
+ self.nodes[parent][0][node] = priority
+
+ def remove(self, node):
+ """Removes the specified node from the digraph, also removing
+ and ties to other nodes in the digraph. Raises KeyError if the
+ node doesn't exist."""
+
+ if node not in self.nodes:
+ raise KeyError(node)
+
+ for parent in self.nodes[node][1]:
+ del self.nodes[parent][0][node]
+ for child in self.nodes[node][0]:
+ del self.nodes[child][1][node]
+
+ del self.nodes[node]
+ self.order.remove(node)
+
+ def contains(self, node):
+ """Checks if the digraph contains mynode"""
+ return node in self.nodes
+
+ def all_nodes(self):
+ """Return a list of all nodes in the graph"""
+ return self.order[:]
+
+ def child_nodes(self, node, ignore_priority=None):
+ """Return all children of the specified node"""
+ if ignore_priority is None:
+ return self.nodes[node][0].keys()
+ children = []
+ for child, priority in self.nodes[node][0].iteritems():
+ if priority > ignore_priority:
+ children.append(child)
+ return children
+
+ def parent_nodes(self, node):
+ """Return all parents of the specified node"""
+ return self.nodes[node][1].keys()
+
+ def leaf_nodes(self, ignore_priority=None):
+ """Return all nodes that have no children
+
+ If ignore_soft_deps is True, soft deps are not counted as
+ children in calculations."""
+
+ leaf_nodes = []
+ for node in self.order:
+ is_leaf_node = True
+ for child in self.nodes[node][0]:
+ if self.nodes[node][0][child] > ignore_priority:
+ is_leaf_node = False
+ break
+ if is_leaf_node:
+ leaf_nodes.append(node)
+ return leaf_nodes
+
+ def root_nodes(self, ignore_priority=None):
+ """Return all nodes that have no parents.
+
+ If ignore_soft_deps is True, soft deps are not counted as
+ parents in calculations."""
+
+ root_nodes = []
+ for node in self.order:
+ is_root_node = True
+ for parent in self.nodes[node][1]:
+ if self.nodes[node][1][parent] > ignore_priority:
+ is_root_node = False
+ break
+ if is_root_node:
+ root_nodes.append(node)
+ return root_nodes
+
+ def is_empty(self):
+ """Checks if the digraph is empty"""
+ return len(self.nodes) == 0
+
+ def clone(self):
+ clone = digraph()
+ clone.nodes = copy.deepcopy(self.nodes)
+ clone.order = self.order[:]
+ return clone
+
+ # Backward compatibility
+ addnode = add
+ allnodes = all_nodes
+ allzeros = leaf_nodes
+ hasnode = contains
+ empty = is_empty
+ copy = clone
+
+ def delnode(self, node):
+ try:
+ self.remove(node)
+ except KeyError:
+ pass
+
+ def firstzero(self):
+ leaf_nodes = self.leaf_nodes()
+ if leaf_nodes:
+ return leaf_nodes[0]
+ return None
+
+ def hasallzeros(self, ignore_priority=None):
+ return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
+ len(self.order)
+
+ def debug_print(self):
+ for node in self.nodes:
+ print node,
+ if self.nodes[node][0]:
+ print "depends on"
+ else:
+ print "(no children)"
+ for child in self.nodes[node][0]:
+ print " ",child,
+ print "(%s)" % self.nodes[node][0][child]
+
+
+_elog_atexit_handlers = []
+def elog_process(cpv, mysettings):
+ mylogfiles = listdir(mysettings["T"]+"/logging/")
+ # shortcut for packages without any messages
+ if len(mylogfiles) == 0:
+ return
+ # exploit listdir() file order so we process log entries in chronological order
+ mylogfiles.reverse()
+ all_logentries = {}
+ for f in mylogfiles:
+ msgfunction, msgtype = f.split(".")
+ if msgfunction not in portage_const.EBUILD_PHASES:
+ writemsg("!!! can't process invalid log file: %s\n" % f,
+ noiselevel=-1)
+ continue
+ if not msgfunction in all_logentries:
+ all_logentries[msgfunction] = []
+ msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
+ all_logentries[msgfunction].append((msgtype, msgcontent))
+
+ def filter_loglevels(logentries, loglevels):
+ # remove unwanted entries from all logentries
+ rValue = {}
+ loglevels = map(str.upper, loglevels)
+ for phase in logentries.keys():
+ for msgtype, msgcontent in logentries[phase]:
+ if msgtype.upper() in loglevels or "*" in loglevels:
+ if not rValue.has_key(phase):
+ rValue[phase] = []
+ rValue[phase].append((msgtype, msgcontent))
+ return rValue
+
+ my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
+ default_logentries = filter_loglevels(all_logentries, my_elog_classes)
+
+ # in case the filters matched all messages and no module overrides exist
+ if len(default_logentries) == 0 and (not ":" in mysettings.get("PORTAGE_ELOG_SYSTEM", "")):
+ return
+
+ def combine_logentries(logentries):
+ # generate a single string with all log messages
+ rValue = ""
+ for phase in portage_const.EBUILD_PHASES:
+ if not phase in logentries:
+ continue
+ for msgtype,msgcontent in logentries[phase]:
+ rValue += "%s: %s\n" % (msgtype, phase)
+ for line in msgcontent:
+ rValue += line
+ rValue += "\n"
+ return rValue
+
+ default_fulllog = combine_logentries(default_logentries)
+
+ # pass the processing to the individual modules
+ logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
+ for s in logsystems:
+ # allow per module overrides of PORTAGE_ELOG_CLASSES
+ if ":" in s:
+ s, levels = s.split(":", 1)
+ levels = levels.split(",")
+ mod_logentries = filter_loglevels(all_logentries, levels)
+ mod_fulllog = combine_logentries(mod_logentries)
+ else:
+ mod_logentries = default_logentries
+ mod_fulllog = default_fulllog
+ if len(mod_logentries) == 0:
+ continue
+ # - is nicer than _ for module names, so allow people to use it.
+ s = s.replace("-", "_")
+ try:
+ # FIXME: ugly ad.hoc import code
+ # TODO: implement a common portage module loader
+ logmodule = __import__("elog_modules.mod_"+s)
+ m = getattr(logmodule, "mod_"+s)
+ def timeout_handler(signum, frame):
+ raise portage_exception.PortageException(
+ "Timeout in elog_process for system '%s'" % s)
+ import signal
+ signal.signal(signal.SIGALRM, timeout_handler)
+ # Timeout after one minute (in case something like the mail
+ # module gets hung).
+ signal.alarm(60)
+ try:
+ m.process(mysettings, cpv, mod_logentries, mod_fulllog)
+ finally:
+ signal.alarm(0)
+ if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
+ _elog_atexit_handlers.append(m.finalize)
+ atexit_register(m.finalize, mysettings)
+ except (ImportError, AttributeError), e:
+ writemsg("!!! Error while importing logging modules " + \
+ "while loading \"mod_%s\":\n" % str(s))
+ writemsg("%s\n" % str(e), noiselevel=-1)
+ except portage_exception.PortageException, e:
+ writemsg("%s\n" % str(e), noiselevel=-1)
+
+ # clean logfiles to avoid repetitions
+ for f in mylogfiles:
+ try:
+ os.unlink(os.path.join(mysettings["T"], "logging", f))
+ except OSError:
+ pass
+
+#parse /etc/env.d and generate /etc/profile.env
+
+def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None):
+ if target_root is None:
+ global root
+ target_root = root
+ if prev_mtimes is None:
+ global mtimedb
+ prev_mtimes = mtimedb["ldpath"]
+ envd_dir = os.path.join(target_root, "etc", "env.d")
+ portage_util.ensure_dirs(envd_dir, mode=0755)
+ fns = listdir(envd_dir, EmptyOnError=1)
+ fns.sort()
+ templist = []
+ for x in fns:
+ if len(x) < 3:
+ continue
+ if not x[0].isdigit() or not x[1].isdigit():
+ continue
+ if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
+ continue
+ templist.append(x)
+ fns = templist
+ del templist
+
+ space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
+ colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
+ "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
+ "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
+ "PYTHONPATH", "ROOTPATH"])
+
+ config_list = []
+
+ for x in fns:
+ file_path = os.path.join(envd_dir, x)
+ try:
+ myconfig = getconfig(file_path, expand=False)
+ except portage_exception.ParseError, e:
+ writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
+ del e
+ continue
+ if myconfig is None:
+ # broken symlink or file removed by a concurrent process
+ writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
+ continue
+ config_list.append(myconfig)
+ if "SPACE_SEPARATED" in myconfig:
+ space_separated.update(myconfig["SPACE_SEPARATED"].split())
+ del myconfig["SPACE_SEPARATED"]
+ if "COLON_SEPARATED" in myconfig:
+ colon_separated.update(myconfig["COLON_SEPARATED"].split())
+ del myconfig["COLON_SEPARATED"]
+
+ env = {}
+ specials = {}
+ for var in space_separated:
+ mylist = []
+ for myconfig in config_list:
+ if var in myconfig:
+ mylist.extend(filter(None, myconfig[var].split()))
+ del myconfig[var] # prepare for env.update(myconfig)
+ if mylist:
+ env[var] = " ".join(mylist)
+ specials[var] = mylist
+
+ for var in colon_separated:
+ mylist = []
+ for myconfig in config_list:
+ if var in myconfig:
+ mylist.extend(filter(None, myconfig[var].split(":")))
+ del myconfig[var] # prepare for env.update(myconfig)
+ if mylist:
+ env[var] = ":".join(mylist)
+ specials[var] = mylist
+
+ for myconfig in config_list:
+ """Cumulative variables have already been deleted from myconfig so that
+ they won't be overwritten by this dict.update call."""
+ env.update(myconfig)
+
+ ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
+ try:
+ myld = open(ldsoconf_path)
+ myldlines=myld.readlines()
+ myld.close()
+ oldld=[]
+ for x in myldlines:
+ #each line has at least one char (a newline)
+ if x[0]=="#":
+ continue
+ oldld.append(x[:-1])
+ except (IOError, OSError), e:
+ if e.errno != errno.ENOENT:
+ raise
+ oldld = None
+
+ ld_cache_update=False
+
+ newld = specials["LDPATH"]
+ if (oldld!=newld):
+ #ld.so.conf needs updating and ldconfig needs to be run
+ myfd = atomic_ofstream(ldsoconf_path)
+ myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
+ myfd.write("# contents of /etc/env.d directory\n")
+ for x in specials["LDPATH"]:
+ myfd.write(x+"\n")
+ myfd.close()
+ ld_cache_update=True
+
+ # Update prelink.conf if we are prelink-enabled
+ if prelink_capable:
+ newprelink = atomic_ofstream(
+ os.path.join(target_root, "etc", "prelink.conf"))
+ newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
+ newprelink.write("# contents of /etc/env.d directory\n")
+
+ for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
+ newprelink.write("-l "+x+"\n");
+ for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
+ if not x:
+ continue
+ if x[-1]!='/':
+ x=x+"/"
+ plmasked=0
+ for y in specials["PRELINK_PATH_MASK"]:
+ if not y:
+ continue
+ if y[-1]!='/':
+ y=y+"/"
+ if y==x[0:len(y)]:
+ plmasked=1
+ break
+ if not plmasked:
+ newprelink.write("-h "+x+"\n")
+ for x in specials["PRELINK_PATH_MASK"]:
+ newprelink.write("-b "+x+"\n")
+ newprelink.close()
+
+ mtime_changed = False
+ lib_dirs = set()
+ for lib_dir in portage_util.unique_array(specials["LDPATH"]+['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
+ x = os.path.join(target_root, lib_dir.lstrip(os.sep))
+ try:
+ newldpathtime = os.stat(x)[stat.ST_MTIME]
+ lib_dirs.add(normalize_path(x))
+ except OSError, oe:
+ if oe.errno == errno.ENOENT:
+ try:
+ del prev_mtimes[x]
+ except KeyError:
+ pass
+ # ignore this path because it doesn't exist
+ continue
+ raise
+ if x in prev_mtimes:
+ if prev_mtimes[x] == newldpathtime:
+ pass
+ else:
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+ else:
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+
+ if mtime_changed:
+ ld_cache_update = True
+
+ if makelinks and \
+ not ld_cache_update and \
+ contents is not None:
+ libdir_contents_changed = False
+ for mypath, mydata in contents.iteritems():
+ if mydata[0] not in ("obj","sym"):
+ continue
+ head, tail = os.path.split(mypath)
+ if head in lib_dirs:
+ libdir_contents_changed = True
+ break
+ if not libdir_contents_changed:
+ makelinks = False
+
+ # Only run ldconfig as needed
+ if (ld_cache_update or makelinks):
+ # ldconfig has very different behaviour between FreeBSD and Linux
+ if ostype=="Linux" or ostype.lower().endswith("gnu"):
+ # We can't update links if we haven't cleaned other versions first, as
+ # an older package installed ON TOP of a newer version will cause ldconfig
+ # to overwrite the symlinks we just made. -X means no links. After 'clean'
+ # we can safely create links.
+ writemsg(">>> Regenerating %setc/ld.so.cache...\n" % target_root)
+ if makelinks:
+ commands.getstatusoutput("cd / ; /sbin/ldconfig -r '%s'" % target_root)
+ else:
+ commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r '%s'" % target_root)
+ elif ostype in ("FreeBSD","DragonFly"):
+ writemsg(">>> Regenerating %svar/run/ld-elf.so.hints...\n" % target_root)
+ commands.getstatusoutput(
+ "cd / ; /sbin/ldconfig -elf -i -f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'" % \
+ (target_root, target_root))
+
+ del specials["LDPATH"]
+
+ penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
+ penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
+ cenvnotice = penvnotice[:]
+ penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
+ cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
+
+ #create /etc/profile.env for bash support
+ outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
+ outfile.write(penvnotice)
+
+ env_keys = [ x for x in env if x != "LDPATH" ]
+ env_keys.sort()
+ for x in env_keys:
+ outfile.write("export %s='%s'\n" % (x, env[x]))
+ outfile.close()
+
+ #create /etc/csh.env for (t)csh support
+ outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
+ outfile.write(cenvnotice)
+ for x in env_keys:
+ outfile.write("setenv %s '%s'\n" % (x, env[x]))
+ outfile.close()
+
+def ExtractKernelVersion(base_dir):
+ """
+ Try to figure out what kernel version we are running
+ @param base_dir: Path to sources (usually /usr/src/linux)
+ @type base_dir: string
+ @rtype: tuple( version[string], error[string])
+ @returns:
+ 1. tuple( version[string], error[string])
+ Either version or error is populated (but never both)
+
+ """
+ lines = []
+ pathname = os.path.join(base_dir, 'Makefile')
+ try:
+ f = open(pathname, 'r')
+ except OSError, details:
+ return (None, str(details))
+ except IOError, details:
+ return (None, str(details))
+
+ try:
+ for i in range(4):
+ lines.append(f.readline())
+ except OSError, details:
+ return (None, str(details))
+ except IOError, details:
+ return (None, str(details))
+
+ lines = [l.strip() for l in lines]
+
+ version = ''
+
+ #XXX: The following code relies on the ordering of vars within the Makefile
+ for line in lines:
+ # split on the '=' then remove annoying whitespace
+ items = line.split("=")
+ items = [i.strip() for i in items]
+ if items[0] == 'VERSION' or \
+ items[0] == 'PATCHLEVEL':
+ version += items[1]
+ version += "."
+ elif items[0] == 'SUBLEVEL':
+ version += items[1]
+ elif items[0] == 'EXTRAVERSION' and \
+ items[-1] != items[0]:
+ version += items[1]
+
+ # Grab a list of files named localversion* and sort them
+ localversions = os.listdir(base_dir)
+ for x in range(len(localversions)-1,-1,-1):
+ if localversions[x][:12] != "localversion":
+ del localversions[x]
+ localversions.sort()
+
+ # Append the contents of each to the version string, stripping ALL whitespace
+ for lv in localversions:
+ version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
+
+ # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
+ kernelconfig = getconfig(base_dir+"/.config")
+ if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
+ version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
+
+ return (version,None)
+
+def autouse(myvartree, use_cache=1, mysettings=None):
+ """
+ autuse returns a list of USE variables auto-enabled to packages being installed
+
+ @param myvartree: Instance of the vartree class (from /var/db/pkg...)
+ @type myvartree: vartree
+ @param use_cache: read values from cache
+ @type use_cache: Boolean
+ @param mysettings: Instance of config
+ @type mysettings: config
+ @rtype: string
+ @returns: A string containing a list of USE variables that are enabled via use.defaults
+ """
+ if mysettings is None:
+ global settings
+ mysettings = settings
+ if mysettings.profile_path is None:
+ return ""
+ myusevars=""
+ usedefaults = mysettings.use_defs
+ for myuse in usedefaults:
+ dep_met = True
+ for mydep in usedefaults[myuse]:
+ if not myvartree.dep_match(mydep,use_cache=True):
+ dep_met = False
+ break
+ if dep_met:
+ myusevars += " "+myuse
+ return myusevars
+
+def check_config_instance(test):
+ if not test or (str(test.__class__) != 'portage.config'):
+ raise TypeError, "Invalid type for config object: %s" % test.__class__
+
+class config:
+ """
+ This class encompasses the main portage configuration. Data is pulled from
+ ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
+ parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
+ overrides.
+
+ Generally if you need data like USE flags, FEATURES, environment variables,
+ virtuals ...etc you look in here.
+ """
+
+ def __init__(self, clone=None, mycpv=None, config_profile_path=None,
+ config_incrementals=None, config_root=None, target_root=None,
+ local_config=True):
+ """
+ @param clone: If provided, init will use deepcopy to copy by value the instance.
+ @type clone: Instance of config class.
+ @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
+ and then calling instance.setcpv(mycpv).
+ @type mycpv: String
+ @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage_const)
+ @type config_profile_path: String
+ @param config_incrementals: List of incremental variables (usually portage_const.INCREMENTALS)
+ @type config_incrementals: List
+ @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
+ @type config_root: String
+ @param target_root: __init__ override of $ROOT env variable.
+ @type target_root: String
+ @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
+ ignore local config (keywording and unmasking)
+ @type local_config: Boolean
+ """
+
+ debug = os.environ.get("PORTAGE_DEBUG") == "1"
+
+ self.already_in_regenerate = 0
+
+ self.locked = 0
+ self.mycpv = None
+ self.puse = []
+ self.modifiedkeys = []
+ self.uvlist = []
+
+ self.virtuals = {}
+ self.virts_p = {}
+ self.dirVirtuals = None
+ self.v_count = 0
+
+ # Virtuals obtained from the vartree
+ self.treeVirtuals = {}
+ # Virtuals by user specification. Includes negatives.
+ self.userVirtuals = {}
+ # Virtual negatives from user specifications.
+ self.negVirtuals = {}
+
+ self.user_profile_dir = None
+ self.local_config = local_config
+
+ if clone:
+ self.incrementals = copy.deepcopy(clone.incrementals)
+ self.profile_path = copy.deepcopy(clone.profile_path)
+ self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
+ self.local_config = copy.deepcopy(clone.local_config)
+
+ self.module_priority = copy.deepcopy(clone.module_priority)
+ self.modules = copy.deepcopy(clone.modules)
+
+ self.depcachedir = copy.deepcopy(clone.depcachedir)
+
+ self.packages = copy.deepcopy(clone.packages)
+ self.virtuals = copy.deepcopy(clone.virtuals)
+
+ self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
+ self.userVirtuals = copy.deepcopy(clone.userVirtuals)
+ self.negVirtuals = copy.deepcopy(clone.negVirtuals)
+
+ self.use_defs = copy.deepcopy(clone.use_defs)
+ self.usemask = copy.deepcopy(clone.usemask)
+ self.usemask_list = copy.deepcopy(clone.usemask_list)
+ self.pusemask_list = copy.deepcopy(clone.pusemask_list)
+ self.useforce = copy.deepcopy(clone.useforce)
+ self.useforce_list = copy.deepcopy(clone.useforce_list)
+ self.puseforce_list = copy.deepcopy(clone.puseforce_list)
+ self.puse = copy.deepcopy(clone.puse)
+ self.make_defaults_use = copy.deepcopy(clone.make_defaults_use)
+ self.pkgprofileuse = copy.deepcopy(clone.pkgprofileuse)
+ self.mycpv = copy.deepcopy(clone.mycpv)
+
+ self.configlist = copy.deepcopy(clone.configlist)
+ self.lookuplist = self.configlist[:]
+ self.lookuplist.reverse()
+ self.configdict = {
+ "env.d": self.configlist[0],
+ "pkginternal": self.configlist[1],
+ "globals": self.configlist[2],
+ "defaults": self.configlist[3],
+ "conf": self.configlist[4],
+ "pkg": self.configlist[5],
+ "auto": self.configlist[6],
+ "backupenv": self.configlist[7],
+ "env": self.configlist[8] }
+ self.profiles = copy.deepcopy(clone.profiles)
+ self.backupenv = self.configdict["backupenv"]
+ self.pusedict = copy.deepcopy(clone.pusedict)
+ self.categories = copy.deepcopy(clone.categories)
+ self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
+ self.pmaskdict = copy.deepcopy(clone.pmaskdict)
+ self.punmaskdict = copy.deepcopy(clone.punmaskdict)
+ self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
+ self.pprovideddict = copy.deepcopy(clone.pprovideddict)
+ self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
+ self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
+ self.features = copy.deepcopy(clone.features)
+ else:
+
+ # backupenv is for calculated incremental variables.
+ self.backupenv = os.environ.copy()
+
+ def check_var_directory(varname, var):
+ if not os.path.isdir(var):
+ writemsg(("!!! Error: %s='%s' is not a directory. " + \
+ "Please correct this.\n") % (varname, var),
+ noiselevel=-1)
+ raise portage_exception.DirectoryNotFound(var)
+
+ if config_root is None:
+ config_root = "/"
+
+ config_root = \
+ normalize_path(config_root).rstrip(os.path.sep) + os.path.sep
+
+ check_var_directory("PORTAGE_CONFIGROOT", config_root)
+
+ self.depcachedir = DEPCACHE_PATH
+
+ if not config_profile_path:
+ config_profile_path = \
+ os.path.join(config_root, PROFILE_PATH.lstrip(os.path.sep))
+ if os.path.isdir(config_profile_path):
+ self.profile_path = config_profile_path
+ else:
+ self.profile_path = None
+ else:
+ self.profile_path = config_profile_path[:]
+
+ if not config_incrementals:
+ writemsg("incrementals not specified to class config\n")
+ self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
+ else:
+ self.incrementals = copy.deepcopy(config_incrementals)
+
+ self.module_priority = ["user","default"]
+ self.modules = {}
+ self.modules["user"] = getconfig(
+ os.path.join(config_root, MODULES_FILE_PATH.lstrip(os.path.sep)))
+ if self.modules["user"] is None:
+ self.modules["user"] = {}
+ self.modules["default"] = {
+ "portdbapi.metadbmodule": "cache.metadata.database",
+ "portdbapi.auxdbmodule": "cache.flat_hash.database",
+ }
+
+ self.usemask=[]
+ self.configlist=[]
+
+ # back up our incremental variables:
+ self.configdict={}
+ # configlist will contain: [ env.d, globals, defaults, conf, pkg, auto, backupenv, env ]
+ self.configlist.append({})
+ self.configdict["env.d"] = self.configlist[-1]
+
+ self.configlist.append({})
+ self.configdict["pkginternal"] = self.configlist[-1]
+
+ # The symlink might not exist or might not be a symlink.
+ if self.profile_path is None:
+ self.profiles = []
+ else:
+ self.profiles = []
+ def addProfile(currentPath):
+ parentsFile = os.path.join(currentPath, "parent")
+ if os.path.exists(parentsFile):
+ parents = grabfile(parentsFile)
+ if not parents:
+ raise portage_exception.ParseError(
+ "Empty parent file: '%s'" % parents_file)
+ for parentPath in parents:
+ parentPath = normalize_path(os.path.join(
+ currentPath, parentPath))
+ if os.path.exists(parentPath):
+ addProfile(parentPath)
+ else:
+ raise portage_exception.ParseError(
+ "Parent '%s' not found: '%s'" % \
+ (parentPath, parentsFile))
+ self.profiles.append(currentPath)
+ addProfile(os.path.realpath(self.profile_path))
+ if local_config:
+ custom_prof = os.path.join(
+ config_root, CUSTOM_PROFILE_PATH.lstrip(os.path.sep))
+ if os.path.exists(custom_prof):
+ self.user_profile_dir = custom_prof
+ self.profiles.append(custom_prof)
+ del custom_prof
+
+ self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
+ self.packages = stack_lists(self.packages_list, incremental=1)
+ del self.packages_list
+ #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
+
+ # revmaskdict
+ self.prevmaskdict={}
+ for x in self.packages:
+ mycatpkg=dep_getkey(x)
+ if not self.prevmaskdict.has_key(mycatpkg):
+ self.prevmaskdict[mycatpkg]=[x]
+ else:
+ self.prevmaskdict[mycatpkg].append(x)
+
+ # get profile-masked use flags -- INCREMENTAL Child over parent
+ self.usemask_list = [grabfile(os.path.join(x, "use.mask")) \
+ for x in self.profiles]
+ self.usemask = set(stack_lists(
+ self.usemask_list, incremental=True))
+ use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
+ self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
+ del use_defs_lists
+
+ self.pusemask_list = []
+ rawpusemask = [grabdict_package(
+ os.path.join(x, "package.use.mask")) \
+ for x in self.profiles]
+ for i in xrange(len(self.profiles)):
+ cpdict = {}
+ for k, v in rawpusemask[i].iteritems():
+ cpdict.setdefault(dep_getkey(k), {})[k] = v
+ self.pusemask_list.append(cpdict)
+ del rawpusemask
+
+ self.pkgprofileuse = []
+ rawprofileuse = [grabdict_package(
+ os.path.join(x, "package.use"), juststrings=True) \
+ for x in self.profiles]
+ for i in xrange(len(self.profiles)):
+ cpdict = {}
+ for k, v in rawprofileuse[i].iteritems():
+ cpdict.setdefault(dep_getkey(k), {})[k] = v
+ self.pkgprofileuse.append(cpdict)
+ del rawprofileuse
+
+ self.useforce_list = [grabfile(os.path.join(x, "use.force")) \
+ for x in self.profiles]
+ self.useforce = set(stack_lists(
+ self.useforce_list, incremental=True))
+
+ self.puseforce_list = []
+ rawpuseforce = [grabdict_package(
+ os.path.join(x, "package.use.force")) \
+ for x in self.profiles]
+ for i in xrange(len(self.profiles)):
+ cpdict = {}
+ for k, v in rawpuseforce[i].iteritems():
+ cpdict.setdefault(dep_getkey(k), {})[k] = v
+ self.puseforce_list.append(cpdict)
+ del rawpuseforce
+
+ try:
+ self.mygcfg = getconfig(os.path.join(config_root, "etc", "make.globals"))
+
+ if self.mygcfg is None:
+ self.mygcfg = {}
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ if debug:
+ raise
+ writemsg("!!! %s\n" % (e), noiselevel=-1)
+ if not isinstance(e, EnvironmentError):
+ writemsg("!!! Incorrect multiline literals can cause " + \
+ "this. Do not use them.\n", noiselevel=-1)
+ sys.exit(1)
+ self.configlist.append(self.mygcfg)
+ self.configdict["globals"]=self.configlist[-1]
+
+ self.make_defaults_use = []
+ self.mygcfg = {}
+ if self.profiles:
+ try:
+ mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
+ for cfg in mygcfg_dlists:
+ if cfg:
+ self.make_defaults_use.append(cfg.get("USE", ""))
+ else:
+ self.make_defaults_use.append("")
+ self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
+ #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
+ if self.mygcfg is None:
+ self.mygcfg = {}
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ if debug:
+ raise
+ writemsg("!!! %s\n" % (e), noiselevel=-1)
+ if not isinstance(e, EnvironmentError):
+ writemsg("!!! 'rm -Rf /usr/portage/profiles; " + \
+ "emerge sync' may fix this. If it does\n",
+ noiselevel=-1)
+ writemsg("!!! not then please report this to " + \
+ "bugs.gentoo.org and, if possible, a dev\n",
+ noiselevel=-1)
+ writemsg("!!! on #gentoo (irc.freenode.org)\n",
+ noiselevel=-1)
+ sys.exit(1)
+ self.configlist.append(self.mygcfg)
+ self.configdict["defaults"]=self.configlist[-1]
+
+ try:
+ self.mygcfg = getconfig(
+ os.path.join(config_root, MAKE_CONF_FILE.lstrip(os.path.sep)),
+ allow_sourcing=True)
+ if self.mygcfg is None:
+ self.mygcfg = {}
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ if debug:
+ raise
+ writemsg("!!! %s\n" % (e), noiselevel=-1)
+ if not isinstance(e, EnvironmentError):
+ writemsg("!!! Incorrect multiline literals can cause " + \
+ "this. Do not use them.\n", noiselevel=-1)
+ sys.exit(1)
+
+ # Allow ROOT setting to come from make.conf if it's not overridden
+ # by the constructor argument (from the calling environment). As a
+ # special exception for a very common use case, config_root == "/"
+ # implies that ROOT in make.conf should be ignored. That way, the
+ # user can chroot into $ROOT and the ROOT setting in make.conf will
+ # be automatically ignored (unless config_root is other than "/").
+ if config_root != "/" and \
+ target_root is None and "ROOT" in self.mygcfg:
+ target_root = self.mygcfg["ROOT"]
+
+ self.configlist.append(self.mygcfg)
+ self.configdict["conf"]=self.configlist[-1]
+
+ self.configlist.append({})
+ self.configdict["pkg"]=self.configlist[-1]
+
+ #auto-use:
+ self.configlist.append({})
+ self.configdict["auto"]=self.configlist[-1]
+
+ self.configlist.append(self.backupenv) # XXX Why though?
+ self.configdict["backupenv"]=self.configlist[-1]
+
+ self.configlist.append(os.environ.copy())
+ self.configdict["env"]=self.configlist[-1]
+
+
+ # make lookuplist for loading package.*
+ self.lookuplist=self.configlist[:]
+ self.lookuplist.reverse()
+
+ # Blacklist vars that could interfere with portage internals.
+ for blacklisted in ["PKGUSE", "PORTAGE_CONFIGROOT", "ROOT"]:
+ for cfg in self.lookuplist:
+ try:
+ del cfg[blacklisted]
+ except KeyError:
+ pass
+ del blacklisted, cfg
+
+ if target_root is None:
+ target_root = "/"
+
+ target_root = \
+ normalize_path(target_root).rstrip(os.path.sep) + os.path.sep
+
+ check_var_directory("ROOT", target_root)
+
+ env_d = getconfig(
+ os.path.join(target_root, "etc", "profile.env"), expand=False)
+ # env_d will be None if profile.env doesn't exist.
+ if env_d:
+ self.configdict["env.d"].update(env_d)
+ # Remove duplicate values so they don't override updated
+ # profile.env values later (profile.env is reloaded in each
+ # call to self.regenerate).
+ for cfg in (self.configdict["backupenv"],
+ self.configdict["env"]):
+ for k, v in env_d.iteritems():
+ try:
+ if cfg[k] == v:
+ del cfg[k]
+ except KeyError:
+ pass
+ del cfg, k, v
+
+ self["PORTAGE_CONFIGROOT"] = config_root
+ self.backup_changes("PORTAGE_CONFIGROOT")
+ self["ROOT"] = target_root
+ self.backup_changes("ROOT")
+
+ self.pusedict = {}
+ self.pkeywordsdict = {}
+ self.punmaskdict = {}
+ abs_user_config = os.path.join(config_root,
+ USER_CONFIG_PATH.lstrip(os.path.sep))
+
+ # locations for "categories" and "arch.list" files
+ locations = [os.path.join(self["PORTDIR"], "profiles")]
+ pmask_locations = [os.path.join(self["PORTDIR"], "profiles")]
+ pmask_locations.extend(self.profiles)
+
+ """ repoman controls PORTDIR_OVERLAY via the environment, so no
+ special cases are needed here."""
+ overlay_profiles = []
+ for ov in self["PORTDIR_OVERLAY"].split():
+ ov = normalize_path(ov)
+ profiles_dir = os.path.join(ov, "profiles")
+ if os.path.isdir(profiles_dir):
+ overlay_profiles.append(profiles_dir)
+ locations += overlay_profiles
+
+ pmask_locations.extend(overlay_profiles)
+
+ if local_config:
+ locations.append(abs_user_config)
+ pmask_locations.append(abs_user_config)
+ pusedict = grabdict_package(
+ os.path.join(abs_user_config, "package.use"), recursive=1)
+ for key in pusedict.keys():
+ cp = dep_getkey(key)
+ if not self.pusedict.has_key(cp):
+ self.pusedict[cp] = {}
+ self.pusedict[cp][key] = pusedict[key]
+
+ #package.keywords
+ pkgdict = grabdict_package(
+ os.path.join(abs_user_config, "package.keywords"),
+ recursive=1)
+ for key in pkgdict.keys():
+ # default to ~arch if no specific keyword is given
+ if not pkgdict[key]:
+ mykeywordlist = []
+ if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
+ groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
+ else:
+ groups = []
+ for keyword in groups:
+ if not keyword[0] in "~-":
+ mykeywordlist.append("~"+keyword)
+ pkgdict[key] = mykeywordlist
+ cp = dep_getkey(key)
+ if not self.pkeywordsdict.has_key(cp):
+ self.pkeywordsdict[cp] = {}
+ self.pkeywordsdict[cp][key] = pkgdict[key]
+
+ #package.unmask
+ pkgunmasklines = grabfile_package(
+ os.path.join(abs_user_config, "package.unmask"),
+ recursive=1)
+ for x in pkgunmasklines:
+ mycatpkg=dep_getkey(x)
+ if self.punmaskdict.has_key(mycatpkg):
+ self.punmaskdict[mycatpkg].append(x)
+ else:
+ self.punmaskdict[mycatpkg]=[x]
+
+ #getting categories from an external file now
+ categories = [grabfile(os.path.join(x, "categories")) for x in locations]
+ self.categories = stack_lists(categories, incremental=1)
+ del categories
+
+ archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
+ archlist = stack_lists(archlist, incremental=1)
+ self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
+
+ #package.mask
+ pkgmasklines = []
+ for x in pmask_locations:
+ pkgmasklines.append(grabfile_package(
+ os.path.join(x, "package.mask"), recursive=1))
+ pkgmasklines = stack_lists(pkgmasklines, incremental=1)
+
+ self.pmaskdict = {}
+ for x in pkgmasklines:
+ mycatpkg=dep_getkey(x)
+ if self.pmaskdict.has_key(mycatpkg):
+ self.pmaskdict[mycatpkg].append(x)
+ else:
+ self.pmaskdict[mycatpkg]=[x]
+
+ pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
+ pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
+ has_invalid_data = False
+ for x in range(len(pkgprovidedlines)-1, -1, -1):
+ myline = pkgprovidedlines[x]
+ if not isvalidatom("=" + myline):
+ writemsg("Invalid package name in package.provided:" + \
+ " %s\n" % myline, noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ cpvr = catpkgsplit(pkgprovidedlines[x])
+ if not cpvr or cpvr[0] == "null":
+ writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n",
+ noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ if cpvr[0] == "virtual":
+ writemsg("Virtual package in package.provided: %s\n" % \
+ myline, noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ if has_invalid_data:
+ writemsg("See portage(5) for correct package.provided usage.\n",
+ noiselevel=-1)
+ self.pprovideddict = {}
+ for x in pkgprovidedlines:
+ cpv=catpkgsplit(x)
+ if not x:
+ continue
+ mycatpkg=dep_getkey(x)
+ if self.pprovideddict.has_key(mycatpkg):
+ self.pprovideddict[mycatpkg].append(x)
+ else:
+ self.pprovideddict[mycatpkg]=[x]
+
+ # reasonable defaults; this is important as without USE_ORDER,
+ # USE will always be "" (nothing set)!
+ if "USE_ORDER" not in self:
+ self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal"
+
+ self["PORTAGE_GID"] = str(portage_gid)
+ self.backup_changes("PORTAGE_GID")
+
+ if self.get("PORTAGE_DEPCACHEDIR", None):
+ self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
+ self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
+ self.backup_changes("PORTAGE_DEPCACHEDIR")
+
+ overlays = self.get("PORTDIR_OVERLAY","").split()
+ if overlays:
+ new_ov = []
+ for ov in overlays:
+ ov = normalize_path(ov)
+ if os.path.isdir(ov):
+ new_ov.append(ov)
+ else:
+ writemsg("!!! Invalid PORTDIR_OVERLAY" + \
+ " (not a dir): '%s'\n" % ov, noiselevel=-1)
+ self["PORTDIR_OVERLAY"] = " ".join(new_ov)
+ self.backup_changes("PORTDIR_OVERLAY")
+
+ if "CBUILD" not in self and "CHOST" in self:
+ self["CBUILD"] = self["CHOST"]
+ self.backup_changes("CBUILD")
+
+ self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
+ self.backup_changes("PORTAGE_BIN_PATH")
+ self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
+ self.backup_changes("PORTAGE_PYM_PATH")
+
+ for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
+ try:
+ self[var] = str(int(self.get(var, "0")))
+ except ValueError:
+ writemsg(("!!! %s='%s' is not a valid integer. " + \
+ "Falling back to '0'.\n") % (var, self[var]),
+ noiselevel=-1)
+ self[var] = "0"
+ self.backup_changes(var)
+
+ self.regenerate()
+ self.features = portage_util.unique_array(self["FEATURES"].split())
+
+ if "gpg" in self.features:
+ if not os.path.exists(self["PORTAGE_GPG_DIR"]) or \
+ not os.path.isdir(self["PORTAGE_GPG_DIR"]):
+ writemsg(colorize("BAD", "PORTAGE_GPG_DIR is invalid." + \
+ " Removing gpg from FEATURES.\n"), noiselevel=-1)
+ self.features.remove("gpg")
+
+ if not portage_exec.sandbox_capable and \
+ ("sandbox" in self.features or "usersandbox" in self.features):
+ if self.profile_path is not None and \
+ os.path.realpath(self.profile_path) == \
+ os.path.realpath(PROFILE_PATH):
+ """ Don't show this warning when running repoman and the
+ sandbox feature came from a profile that doesn't belong to
+ the user."""
+ writemsg(colorize("BAD", "!!! Problem with sandbox" + \
+ " binary. Disabling...\n\n"), noiselevel=-1)
+ if "sandbox" in self.features:
+ self.features.remove("sandbox")
+ if "usersandbox" in self.features:
+ self.features.remove("usersandbox")
+
+ self.features.sort()
+ self["FEATURES"] = " ".join(self.features)
+ self.backup_changes("FEATURES")
+
+ self._init_dirs()
+
+ if mycpv:
+ self.setcpv(mycpv)
+
+ def _init_dirs(self):
+ """
+ Create a few directories that are critical to portage operation
+ """
+ if not os.access(self["ROOT"], os.W_OK):
+ return
+
+ dir_mode_map = {
+ "tmp" :(-1, 01777, 0),
+ "var/tmp" :(-1, 01777, 0),
+ "var/lib/portage" :(portage_gid, 02750, 02),
+ "var/cache/edb" :(portage_gid, 0755, 02)
+ }
+
+ for mypath, (gid, mode, modemask) in dir_mode_map.iteritems():
+ try:
+ mydir = os.path.join(self["ROOT"], mypath)
+ portage_util.ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
+ except portage_exception.PortageException, e:
+ writemsg("!!! Directory initialization failed: '%s'\n" % mydir,
+ noiselevel=-1)
+ writemsg("!!! %s\n" % str(e),
+ noiselevel=-1)
+
+ def validate(self):
+ """Validate miscellaneous settings and display warnings if necessary.
+ (This code was previously in the global scope of portage.py)"""
+
+ groups = self["ACCEPT_KEYWORDS"].split()
+ archlist = self.archlist()
+ if not archlist:
+ writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
+ else:
+ for group in groups:
+ if group not in archlist and group[0] != '-':
+ writemsg("!!! INVALID ACCEPT_KEYWORDS: %s\n" % str(group),
+ noiselevel=-1)
+
+ abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
+ PROFILE_PATH.lstrip(os.path.sep))
+ if not os.path.islink(abs_profile_path) and \
+ not os.path.exists(os.path.join(abs_profile_path, "parent")) and \
+ os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
+ writemsg("\a\n\n!!! %s is not a symlink and will probably prevent most merges.\n" % abs_profile_path,
+ noiselevel=-1)
+ writemsg("!!! It should point into a profile within %s/profiles/\n" % self["PORTDIR"])
+ writemsg("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n")
+
+ abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
+ USER_VIRTUALS_FILE.lstrip(os.path.sep))
+ if os.path.exists(abs_user_virtuals):
+ writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
+ writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
+ writemsg("!!! this new location.\n\n")
+
+ def loadVirtuals(self,root):
+ """Not currently used by portage."""
+ writemsg("DEPRECATED: portage.config.loadVirtuals\n")
+ self.getvirtuals(root)
+
+ def load_best_module(self,property_string):
+ best_mod = best_from_dict(property_string,self.modules,self.module_priority)
+ try:
+ mod = load_mod(best_mod)
+ except ImportError:
+ dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
+ sys.exit(1)
+ return mod
+
+ def lock(self):
+ self.locked = 1
+
+ def unlock(self):
+ self.locked = 0
+
+ def modifying(self):
+ if self.locked:
+ raise Exception, "Configuration is locked."
+
+ def backup_changes(self,key=None):
+ self.modifying()
+ if key and self.configdict["env"].has_key(key):
+ self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
+ else:
+ raise KeyError, "No such key defined in environment: %s" % key
+
+ def reset(self,keeping_pkg=0,use_cache=1):
+ """
+ Restore environment from self.backupenv, call self.regenerate()
+ @param keeping_pkg: Should we keep the set_cpv() data or delete it.
+ @type keeping_pkg: Boolean
+ @param use_cache: Should self.regenerate use the cache or not
+ @type use_cache: Boolean
+ @rype: None
+ """
+ self.modifying()
+ self.configdict["env"].clear()
+ self.configdict["env"].update(self.backupenv)
+
+ self.modifiedkeys = []
+ if not keeping_pkg:
+ self.mycpv = None
+ self.puse = ""
+ self.configdict["pkg"].clear()
+ self.configdict["pkginternal"].clear()
+ self.configdict["defaults"]["USE"] = \
+ " ".join(self.make_defaults_use)
+ self.usemask = set(stack_lists(
+ self.usemask_list, incremental=True))
+ self.useforce = set(stack_lists(
+ self.useforce_list, incremental=True))
+ self.regenerate(use_cache=use_cache)
+
+ def load_infodir(self,infodir):
+ self.modifying()
+ if self.configdict.has_key("pkg"):
+ for x in self.configdict["pkg"].keys():
+ del self.configdict["pkg"][x]
+ else:
+ writemsg("No pkg setup for settings instance?\n",
+ noiselevel=-1)
+ sys.exit(17)
+
+ if os.path.exists(infodir):
+ if os.path.exists(infodir+"/environment"):
+ self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
+
+ myre = re.compile('^[A-Z]+$')
+ null_byte = "\0"
+ for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
+ if myre.match(filename):
+ try:
+ file_path = os.path.join(infodir, filename)
+ mydata = open(file_path).read().strip()
+ if len(mydata) < 2048 or filename == "USE":
+ if null_byte in mydata:
+ writemsg("!!! Null byte found in metadata " + \
+ "file: '%s'\n" % file_path, noiselevel=-1)
+ continue
+ if filename == "USE":
+ binpkg_flags = "-* " + mydata
+ self.configdict["pkg"][filename] = binpkg_flags
+ self.configdict["env"][filename] = mydata
+ else:
+ self.configdict["pkg"][filename] = mydata
+ self.configdict["env"][filename] = mydata
+ # CATEGORY is important because it's used in doebuild
+ # to infer the cpv. If it's corrupted, it leads to
+ # strange errors later on, so we'll validate it and
+ # print a warning if necessary.
+ if filename == "CATEGORY":
+ matchobj = re.match("[-a-zA-Z0-9_.+]+", mydata)
+ if not matchobj or matchobj.start() != 0 or \
+ matchobj.end() != len(mydata):
+ writemsg("!!! CATEGORY file is corrupt: %s\n" % \
+ os.path.join(infodir, filename), noiselevel=-1)
+ except (OSError, IOError):
+ writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename,
+ noiselevel=-1)
+ pass
+ return 1
+ return 0
+
+ def setcpv(self, mycpv, use_cache=1, mydb=None):
+ """
+ Load a particular CPV into the config, this lets us see the
+ Default USE flags for a particular ebuild as well as the USE
+ flags from package.use.
+
+ @param mycpv: A cpv to load
+ @type mycpv: string
+ @param use_cache: Enables caching
+ @type use_cache: Boolean
+ @param mydb: a dbapi instance that supports aux_get with the IUSE key.
+ @type mydb: dbapi or derivative.
+ @rtype: None
+ """
+
+ self.modifying()
+ if self.mycpv == mycpv:
+ return
+ has_changed = False
+ self.mycpv = mycpv
+ cp = dep_getkey(mycpv)
+ pkginternaluse = ""
+ if mydb:
+ pkginternaluse = " ".join([x[1:] \
+ for x in mydb.aux_get(mycpv, ["IUSE"])[0].split() \
+ if x.startswith("+")])
+ if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
+ self.configdict["pkginternal"]["USE"] = pkginternaluse
+ has_changed = True
+ defaults = []
+ for i in xrange(len(self.profiles)):
+ defaults.append(self.make_defaults_use[i])
+ cpdict = self.pkgprofileuse[i].get(cp, None)
+ if cpdict:
+ best_match = best_match_to_list(self.mycpv, cpdict.keys())
+ if best_match:
+ defaults.append(cpdict[best_match])
+ defaults = " ".join(defaults)
+ if defaults != self.configdict["defaults"].get("USE",""):
+ self.configdict["defaults"]["USE"] = defaults
+ has_changed = True
+ useforce = []
+ for i in xrange(len(self.profiles)):
+ useforce.append(self.useforce_list[i])
+ cpdict = self.puseforce_list[i].get(cp, None)
+ if cpdict:
+ best_match = best_match_to_list(self.mycpv, cpdict.keys())
+ if best_match:
+ useforce.append(cpdict[best_match])
+ useforce = set(stack_lists(useforce, incremental=True))
+ if useforce != self.useforce:
+ self.useforce = useforce
+ has_changed = True
+ usemask = []
+ for i in xrange(len(self.profiles)):
+ usemask.append(self.usemask_list[i])
+ cpdict = self.pusemask_list[i].get(cp, None)
+ if cpdict:
+ best_match = best_match_to_list(self.mycpv, cpdict.keys())
+ if best_match:
+ usemask.append(cpdict[best_match])
+ usemask = set(stack_lists(usemask, incremental=True))
+ if usemask != self.usemask:
+ self.usemask = usemask
+ has_changed = True
+ oldpuse = self.puse
+ self.puse = ""
+ if self.pusedict.has_key(cp):
+ self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
+ if self.pusekey:
+ self.puse = " ".join(self.pusedict[cp][self.pusekey])
+ if oldpuse != self.puse:
+ has_changed = True
+ self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
+ self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
+ # CATEGORY is essential for doebuild calls
+ self.configdict["pkg"]["CATEGORY"] = mycpv.split("/")[0]
+ if has_changed:
+ self.reset(keeping_pkg=1,use_cache=use_cache)
+
+ def setinst(self,mycpv,mydbapi):
+ self.modifying()
+ if len(self.virtuals) == 0:
+ self.getvirtuals()
+ # Grab the virtuals this package provides and add them into the tree virtuals.
+ provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
+ if isinstance(mydbapi, portdbapi):
+ myuse = self["USE"]
+ else:
+ myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
+ virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
+
+ cp = dep_getkey(mycpv)
+ for virt in virts:
+ virt = dep_getkey(virt)
+ if not self.treeVirtuals.has_key(virt):
+ self.treeVirtuals[virt] = []
+ # XXX: Is this bad? -- It's a permanent modification
+ if cp not in self.treeVirtuals[virt]:
+ self.treeVirtuals[virt].append(cp)
+
+ self.virtuals = self.__getvirtuals_compile()
+
+
+ def regenerate(self,useonly=0,use_cache=1):
+ """
+ Regenerate settings
+ This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
+ re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
+ variables. This also updates the env.d configdict; useful in case an ebuild
+ changes the environment.
+
+ If FEATURES has already stacked, it is not stacked twice.
+
+ @param useonly: Only regenerate USE flags (not any other incrementals)
+ @type useonly: Boolean
+ @param use_cache: Enable Caching (only for autouse)
+ @type use_cache: Boolean
+ @rtype: None
+ """
+
+ self.modifying()
+ if self.already_in_regenerate:
+ # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
+ writemsg("!!! Looping in regenerate.\n",1)
+ return
+ else:
+ self.already_in_regenerate = 1
+
+ # We grab the latest profile.env here since it changes frequently.
+ self.configdict["env.d"].clear()
+ env_d = getconfig(
+ os.path.join(self["ROOT"], "etc", "profile.env"), expand=False)
+ if env_d:
+ # env_d will be None if profile.env doesn't exist.
+ self.configdict["env.d"].update(env_d)
+
+ if useonly:
+ myincrementals=["USE"]
+ else:
+ myincrementals = self.incrementals
+ myincrementals = set(myincrementals)
+ # If self.features exists, it has already been stacked and may have
+ # been mutated, so don't stack it again or else any mutations will be
+ # reverted.
+ if "FEATURES" in myincrementals and hasattr(self, "features"):
+ myincrementals.remove("FEATURES")
+
+ if "USE" in myincrementals:
+ # Process USE last because it depends on USE_EXPAND which is also
+ # an incremental!
+ myincrementals.remove("USE")
+
+ for mykey in myincrementals:
+
+ mydbs=self.configlist[:-1]
+
+ myflags=[]
+ for curdb in mydbs:
+ if mykey not in curdb:
+ continue
+ #variables are already expanded
+ mysplit = curdb[mykey].split()
+
+ for x in mysplit:
+ if x=="-*":
+ # "-*" is a special "minus" var that means "unset all settings".
+ # so USE="-* gnome" will have *just* gnome enabled.
+ myflags = []
+ continue
+
+ if x[0]=="+":
+ # Not legal. People assume too much. Complain.
+ writemsg(red("USE flags should not start with a '+': %s\n" % x),
+ noiselevel=-1)
+ x=x[1:]
+ if not x:
+ continue
+
+ if (x[0]=="-"):
+ if (x[1:] in myflags):
+ # Unset/Remove it.
+ del myflags[myflags.index(x[1:])]
+ continue
+
+ # We got here, so add it now.
+ if x not in myflags:
+ myflags.append(x)
+
+ myflags.sort()
+ #store setting in last element of configlist, the original environment:
+ if myflags or mykey in self:
+ self.configlist[-1][mykey] = " ".join(myflags)
+ del myflags
+
+ # Do the USE calculation last because it depends on USE_EXPAND.
+ if "auto" in self["USE_ORDER"].split(":"):
+ self.configdict["auto"]["USE"] = autouse(
+ vartree(root=self["ROOT"], categories=self.categories,
+ settings=self),
+ use_cache=use_cache, mysettings=self)
+ else:
+ self.configdict["auto"]["USE"] = ""
+
+ use_expand_protected = []
+ use_expand = self.get("USE_EXPAND", "").split()
+ for var in use_expand:
+ var_lower = var.lower()
+ for x in self.get(var, "").split():
+ # Any incremental USE_EXPAND variables have already been
+ # processed, so leading +/- operators are invalid here.
+ if x[0] == "+":
+ writemsg(colorize("BAD", "Invalid '+' operator in " + \
+ "non-incremental variable '%s': '%s'\n" % (var, x)),
+ noiselevel=-1)
+ x = x[1:]
+ if x[0] == "-":
+ writemsg(colorize("BAD", "Invalid '-' operator in " + \
+ "non-incremental variable '%s': '%s'\n" % (var, x)),
+ noiselevel=-1)
+ continue
+ mystr = var_lower + "_" + x
+ if mystr not in use_expand_protected:
+ use_expand_protected.append(mystr)
+
+ if not self.uvlist:
+ for x in self["USE_ORDER"].split(":"):
+ if x in self.configdict:
+ self.uvlist.append(self.configdict[x])
+ self.uvlist.reverse()
+
+ myflags = use_expand_protected[:]
+ for curdb in self.uvlist:
+ if "USE" not in curdb:
+ continue
+ mysplit = curdb["USE"].split()
+ for x in mysplit:
+ if x == "-*":
+ myflags = use_expand_protected[:]
+ continue
+
+ if x[0] == "+":
+ writemsg(colorize("BAD", "USE flags should not start " + \
+ "with a '+': %s\n" % x), noiselevel=-1)
+ x = x[1:]
+ if not x:
+ continue
+
+ if x[0] == "-":
+ try:
+ myflags.remove(x[1:])
+ except ValueError:
+ pass
+ continue
+
+ if x not in myflags:
+ myflags.append(x)
+
+ myflags = set(myflags)
+ myflags.update(self.useforce)
+
+ # FEATURES=test should imply USE=test
+ if "test" in self.configlist[-1].get("FEATURES","").split():
+ myflags.add("test")
+
+ usesplit = [ x for x in myflags if \
+ x not in self.usemask]
+
+ usesplit.sort()
+
+ # Use the calculated USE flags to regenerate the USE_EXPAND flags so
+ # that they are consistent.
+ for var in use_expand:
+ prefix = var.lower() + "_"
+ prefix_len = len(prefix)
+ expand_flags = set([ x[prefix_len:] for x in usesplit \
+ if x.startswith(prefix) ])
+ var_split = self.get(var, "").split()
+ # Preserve the order of var_split because it can matter for things
+ # like LINGUAS.
+ var_split = [ x for x in var_split if x in expand_flags ]
+ var_split.extend(expand_flags.difference(var_split))
+ if var_split or var in self:
+ # Don't export empty USE_EXPAND vars unless the user config
+ # exports them as empty. This is required for vars such as
+ # LINGUAS, where unset and empty have different meanings.
+ self[var] = " ".join(var_split)
+
+ # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
+ if self.configdict["defaults"].has_key("ARCH"):
+ if self.configdict["defaults"]["ARCH"]:
+ if self.configdict["defaults"]["ARCH"] not in usesplit:
+ usesplit.insert(0,self.configdict["defaults"]["ARCH"])
+
+ self.configlist[-1]["USE"]= " ".join(usesplit)
+
+ self.already_in_regenerate = 0
+
+ def get_virts_p(self, myroot):
+ if self.virts_p:
+ return self.virts_p
+ virts = self.getvirtuals(myroot)
+ if virts:
+ myvkeys = virts.keys()
+ for x in myvkeys:
+ vkeysplit = x.split("/")
+ if not self.virts_p.has_key(vkeysplit[1]):
+ self.virts_p[vkeysplit[1]] = virts[x]
+ return self.virts_p
+
+ def getvirtuals(self, myroot=None):
+ """myroot is now ignored because, due to caching, it has always been
+ broken for all but the first call."""
+ myroot = self["ROOT"]
+ if self.virtuals:
+ return self.virtuals
+
+ virtuals_list = []
+ for x in self.profiles:
+ virtuals_file = os.path.join(x, "virtuals")
+ virtuals_dict = grabdict(virtuals_file)
+ for k in virtuals_dict.keys():
+ if not isvalidatom(k) or dep_getkey(k) != k:
+ writemsg("--- Invalid virtuals atom in %s: %s\n" % \
+ (virtuals_file, k), noiselevel=-1)
+ del virtuals_dict[k]
+ continue
+ myvalues = virtuals_dict[k]
+ for x in myvalues:
+ myatom = x
+ if x.startswith("-"):
+ # allow incrementals
+ myatom = x[1:]
+ if not isvalidatom(myatom):
+ writemsg("--- Invalid atom in %s: %s\n" % \
+ (virtuals_file, x), noiselevel=-1)
+ myvalues.remove(x)
+ if not myvalues:
+ del virtuals_dict[k]
+ if virtuals_dict:
+ virtuals_list.append(virtuals_dict)
+
+ self.dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
+ del virtuals_list
+
+ for virt in self.dirVirtuals:
+ # Preference for virtuals decreases from left to right.
+ self.dirVirtuals[virt].reverse()
+
+ # Repoman does not use user or tree virtuals.
+ if self.local_config and not self.treeVirtuals:
+ temp_vartree = vartree(myroot, None,
+ categories=self.categories, settings=self)
+ # Reduce the provides into a list by CP.
+ self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
+
+ self.virtuals = self.__getvirtuals_compile()
+ return self.virtuals
+
+ def __getvirtuals_compile(self):
+ """Stack installed and profile virtuals. Preference for virtuals
+ decreases from left to right.
+ Order of preference:
+ 1. installed and in profile
+ 2. installed only
+ 3. profile only
+ """
+
+ # Virtuals by profile+tree preferences.
+ ptVirtuals = {}
+
+ for virt, installed_list in self.treeVirtuals.iteritems():
+ profile_list = self.dirVirtuals.get(virt, None)
+ if not profile_list:
+ continue
+ for cp in installed_list:
+ if cp in profile_list:
+ ptVirtuals.setdefault(virt, [])
+ ptVirtuals[virt].append(cp)
+
+ virtuals = stack_dictlist([ptVirtuals, self.treeVirtuals,
+ self.dirVirtuals])
+ return virtuals
+
+ def __delitem__(self,mykey):
+ self.modifying()
+ for x in self.lookuplist:
+ if x != None:
+ if mykey in x:
+ del x[mykey]
+
+ def __getitem__(self,mykey):
+ match = ''
+ for x in self.lookuplist:
+ if x is None:
+ writemsg("!!! lookuplist is null.\n")
+ elif x.has_key(mykey):
+ match = x[mykey]
+ break
+ return match
+
+ def has_key(self,mykey):
+ for x in self.lookuplist:
+ if x.has_key(mykey):
+ return 1
+ return 0
+
+ def __contains__(self, mykey):
+ """Called to implement membership test operators (in and not in)."""
+ return bool(self.has_key(mykey))
+
+ def setdefault(self, k, x=None):
+ if k in self:
+ return self[k]
+ else:
+ self[k] = x
+ return x
+
+ def get(self, k, x=None):
+ if k in self:
+ return self[k]
+ else:
+ return x
+
+ def keys(self):
+ return unique_array(flatten([x.keys() for x in self.lookuplist]))
+
+ def __setitem__(self,mykey,myvalue):
+ "set a value; will be thrown away at reset() time"
+ if type(myvalue) != types.StringType:
+ raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
+ self.modifying()
+ self.modifiedkeys += [mykey]
+ self.configdict["env"][mykey]=myvalue
+
+ def environ(self):
+ "return our locally-maintained environment"
+ mydict={}
+ for x in self.keys():
+ myvalue = self[x]
+ if not isinstance(myvalue, basestring):
+ writemsg("!!! Non-string value in config: %s=%s\n" % \
+ (x, myvalue), noiselevel=-1)
+ continue
+ mydict[x] = myvalue
+ if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
+ writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
+ mydict["HOME"]=mydict["BUILD_PREFIX"][:]
+
+ return mydict
+
+ def thirdpartymirrors(self):
+ if getattr(self, "_thirdpartymirrors", None) is None:
+ profileroots = [os.path.join(self["PORTDIR"], "profiles")]
+ for x in self["PORTDIR_OVERLAY"].split():
+ profileroots.insert(0, os.path.join(x, "profiles"))
+ thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
+ self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
+ return self._thirdpartymirrors
+
+ def archlist(self):
+ return flatten([[myarch, "~" + myarch] \
+ for myarch in self["PORTAGE_ARCHLIST"].split()])
+
+ def selinux_enabled(self):
+ if getattr(self, "_selinux_enabled", None) is None:
+ self._selinux_enabled = 0
+ if "selinux" in self["USE"].split():
+ if "selinux" in globals():
+ if selinux.is_selinux_enabled() == 1:
+ self._selinux_enabled = 1
+ else:
+ self._selinux_enabled = 0
+ else:
+ writemsg("!!! SELinux module not found. Please verify that it was installed.\n",
+ noiselevel=-1)
+ self._selinux_enabled = 0
+ if self._selinux_enabled == 0:
+ try:
+ del sys.modules["selinux"]
+ except KeyError:
+ pass
+ return self._selinux_enabled
+
+# XXX This would be to replace getstatusoutput completely.
+# XXX Issue: cannot block execution. Deadlock condition.
+def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, **keywords):
+ """
+ Spawn a subprocess with extra portage-specific options.
+ Optiosn include:
+
+ Sandbox: Sandbox means the spawned process will be limited in its ability t
+ read and write files (normally this means it is restricted to ${IMAGE}/)
+ SElinux Sandbox: Enables sandboxing on SElinux
+ Reduced Privileges: Drops privilages such that the process runs as portage:portage
+ instead of as root.
+
+ Notes: os.system cannot be used because it messes with signal handling. Instead we
+ use the portage_exec spawn* family of functions.
+
+ This function waits for the process to terminate.
+
+ @param mystring: Command to run
+ @type mystring: String
+ @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
+ @type mysettings: Dictionary or config instance
+ @param debug: Ignored
+ @type debug: Boolean
+ @param free: Enable sandboxing for this process
+ @type free: Boolean
+ @param droppriv: Drop to portage:portage when running this command
+ @type droppriv: Boolean
+ @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
+ @type sesandbox: Boolean
+ @param keywords: Extra options encoded as a dict, to be passed to spawn
+ @type keywords: Dictionary
+ @rtype: Integer
+ @returns:
+ 1. The return code of the spawned process.
+ """
+
+ if type(mysettings) == types.DictType:
+ env=mysettings
+ keywords["opt_name"]="[ %s ]" % "portage"
+ else:
+ check_config_instance(mysettings)
+ env=mysettings.environ()
+ keywords["opt_name"]="[%s]" % mysettings["PF"]
+
+ # The default policy for the sesandbox domain only allows entry (via exec)
+ # from shells and from binaries that belong to portage (the number of entry
+ # points is minimized). The "tee" binary is not among the allowed entry
+ # points, so it is spawned outside of the sesandbox domain and reads from a
+ # pipe between two domains.
+ logfile = keywords.get("logfile")
+ mypids = []
+ pw = None
+ if logfile:
+ del keywords["logfile"]
+ fd_pipes = keywords.get("fd_pipes")
+ if fd_pipes is None:
+ fd_pipes = {0:0, 1:1, 2:2}
+ elif 1 not in fd_pipes or 2 not in fd_pipes:
+ raise ValueError(fd_pipes)
+ pr, pw = os.pipe()
+ mypids.extend(portage_exec.spawn(('tee', '-i', '-a', logfile),
+ returnpid=True, fd_pipes={0:pr, 1:fd_pipes[1], 2:fd_pipes[2]}))
+ os.close(pr)
+ fd_pipes[1] = pw
+ fd_pipes[2] = pw
+ keywords["fd_pipes"] = fd_pipes
+
+ features = mysettings.features
+ # XXX: Negative RESTRICT word
+ droppriv=(droppriv and ("userpriv" in features) and not \
+ (("nouserpriv" in mysettings["RESTRICT"].split()) or \
+ ("userpriv" in mysettings["RESTRICT"].split())))
+
+ if droppriv and not uid and portage_gid and portage_uid:
+ keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":userpriv_groups,"umask":002})
+
+ if not free:
+ free=((droppriv and "usersandbox" not in features) or \
+ (not droppriv and "sandbox" not in features and "usersandbox" not in features))
+
+ if free:
+ keywords["opt_name"] += " bash"
+ spawn_func = portage_exec.spawn_bash
+ else:
+ keywords["opt_name"] += " sandbox"
+ spawn_func = portage_exec.spawn_sandbox
+
+ if sesandbox:
+ con = selinux.getcontext()
+ con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_SANDBOX_T"])
+ selinux.setexec(con)
+
+ returnpid = keywords.get("returnpid")
+ keywords["returnpid"] = True
+ try:
+ mypids.extend(spawn_func(mystring, env=env, **keywords))
+ finally:
+ if pw:
+ os.close(pw)
+ if sesandbox:
+ selinux.setexec(None)
+
+ if returnpid:
+ return mypids
+
+ while mypids:
+ pid = mypids.pop(0)
+ retval = os.waitpid(pid, 0)[1]
+ portage_exec.spawned_pids.remove(pid)
+ if retval != os.EX_OK:
+ for pid in mypids:
+ if os.waitpid(pid, os.WNOHANG) == (0,0):
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+ portage_exec.spawned_pids.remove(pid)
+ if retval & 0xff:
+ return (retval & 0xff) << 8
+ return retval >> 8
+ return os.EX_OK
+
+def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
+ "fetch files. Will use digest file if available."
+
+ features = mysettings.features
+ # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
+ if ("mirror" in mysettings["RESTRICT"].split()) or \
+ ("nomirror" in mysettings["RESTRICT"].split()):
+ if ("mirror" in features) and ("lmirror" not in features):
+ # lmirror should allow you to bypass mirror restrictions.
+ # XXX: This is not a good thing, and is temporary at best.
+ print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
+ return 1
+
+ thirdpartymirrors = mysettings.thirdpartymirrors()
+
+ check_config_instance(mysettings)
+
+ custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
+ CUSTOM_MIRRORS_FILE.lstrip(os.path.sep)), recursive=1)
+
+ mymirrors=[]
+
+ if listonly or ("distlocks" not in features):
+ use_locks = 0
+
+ fetch_to_ro = 0
+ if "skiprocheck" in features:
+ fetch_to_ro = 1
+
+ if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
+ if use_locks:
+ writemsg(red("!!! For fetching to a read-only filesystem, " + \
+ "locking should be turned off.\n"), noiselevel=-1)
+ writemsg("!!! This can be done by adding -distlocks to " + \
+ "FEATURES in /etc/make.conf\n", noiselevel=-1)
+# use_locks = 0
+
+ # local mirrors are always added
+ if custommirrors.has_key("local"):
+ mymirrors += custommirrors["local"]
+
+ if ("nomirror" in mysettings["RESTRICT"].split()) or \
+ ("mirror" in mysettings["RESTRICT"].split()):
+ # We don't add any mirrors.
+ pass
+ else:
+ if try_mirrors:
+ mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
+
+ mydigests = Manifest(
+ mysettings["O"], mysettings["DISTDIR"]).getTypeDigests("DIST")
+
+ fsmirrors = []
+ for x in range(len(mymirrors)-1,-1,-1):
+ if mymirrors[x] and mymirrors[x][0]=='/':
+ fsmirrors += [mymirrors[x]]
+ del mymirrors[x]
+
+ restrict_fetch = "fetch" in mysettings["RESTRICT"].split()
+ custom_local_mirrors = custommirrors.get("local", [])
+ if restrict_fetch:
+ # With fetch restriction, a normal uri may only be fetched from
+ # custom local mirrors (if available). A mirror:// uri may also
+ # be fetched from specific mirrors (effectively overriding fetch
+ # restriction, but only for specific mirrors).
+ locations = custom_local_mirrors
+ else:
+ locations = mymirrors
+
+ filedict={}
+ primaryuri_indexes={}
+ for myuri in myuris:
+ myfile=os.path.basename(myuri)
+ if not filedict.has_key(myfile):
+ filedict[myfile]=[]
+ for y in range(0,len(locations)):
+ filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
+ if myuri[:9]=="mirror://":
+ eidx = myuri.find("/", 9)
+ if eidx != -1:
+ mirrorname = myuri[9:eidx]
+
+ # Try user-defined mirrors first
+ if custommirrors.has_key(mirrorname):
+ for cmirr in custommirrors[mirrorname]:
+ filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
+ # remove the mirrors we tried from the list of official mirrors
+ if cmirr.strip() in thirdpartymirrors[mirrorname]:
+ thirdpartymirrors[mirrorname].remove(cmirr)
+ # now try the official mirrors
+ if thirdpartymirrors.has_key(mirrorname):
+ shuffle(thirdpartymirrors[mirrorname])
+
+ for locmirr in thirdpartymirrors[mirrorname]:
+ filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
+
+ if not filedict[myfile]:
+ writemsg("No known mirror by the name: %s\n" % (mirrorname))
+ else:
+ writemsg("Invalid mirror definition in SRC_URI:\n", noiselevel=-1)
+ writemsg(" %s\n" % (myuri), noiselevel=-1)
+ else:
+ if restrict_fetch:
+ # Only fetch from specific mirrors is allowed.
+ continue
+ if "primaryuri" in mysettings["RESTRICT"].split():
+ # Use the source site first.
+ if primaryuri_indexes.has_key(myfile):
+ primaryuri_indexes[myfile] += 1
+ else:
+ primaryuri_indexes[myfile] = 0
+ filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
+ else:
+ filedict[myfile].append(myuri)
+
+ can_fetch=True
+
+ if listonly:
+ can_fetch = False
+
+ for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
+ if not mysettings.get(var_name, None):
+ can_fetch = False
+
+ if can_fetch:
+ dirmode = 02070
+ filemode = 060
+ modemask = 02
+ distdir_dirs = [""]
+ if "distlocks" in features:
+ distdir_dirs.append(".locks")
+ try:
+
+ for x in distdir_dirs:
+ mydir = os.path.join(mysettings["DISTDIR"], x)
+ if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
+ writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
+ noiselevel=-1)
+ def onerror(e):
+ raise # bail out on the first error that occurs during recursion
+ if not apply_recursive_permissions(mydir,
+ gid=portage_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise portage_exception.OperationNotPermitted(
+ "Failed to apply recursive permissions for the portage group.")
+ except portage_exception.PortageException, e:
+ if not os.path.isdir(mysettings["DISTDIR"]):
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg("!!! Directory Not Found: DISTDIR='%s'\n" % mysettings["DISTDIR"], noiselevel=-1)
+ writemsg("!!! Fetching will fail!\n", noiselevel=-1)
+
+ if can_fetch and \
+ not fetch_to_ro and \
+ not os.access(mysettings["DISTDIR"], os.W_OK):
+ writemsg("!!! No write access to '%s'\n" % mysettings["DISTDIR"],
+ noiselevel=-1)
+ can_fetch = False
+
+ if can_fetch and use_locks and locks_in_subdir:
+ distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
+ if not os.access(distlocks_subdir, os.W_OK):
+ writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir,
+ noiselevel=-1)
+ return 0
+ del distlocks_subdir
+ for myfile in filedict.keys():
+ """
+ fetched status
+ 0 nonexistent
+ 1 partially downloaded
+ 2 completely downloaded
+ """
+ myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
+ fetched=0
+ file_lock = None
+ if listonly:
+ writemsg_stdout("\n", noiselevel=-1)
+ else:
+ if use_locks and can_fetch:
+ if locks_in_subdir:
+ file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
+ else:
+ file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
+ try:
+ if not listonly:
+ if fsmirrors and not os.path.exists(myfile_path):
+ for mydir in fsmirrors:
+ mirror_file = os.path.join(mydir, myfile)
+ try:
+ shutil.copyfile(mirror_file, myfile_path)
+ writemsg(_("Local mirror has file:" + \
+ " %(file)s\n" % {"file":myfile}))
+ break
+ except (IOError, OSError), e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ else:
+ try:
+ apply_secpass_permissions(
+ myfile_path, gid=portage_gid, mode=0664, mask=02,
+ stat_cached=mystat)
+ except portage_exception.PortageException, e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg("!!! Failed to adjust permissions:" + \
+ " %s\n" % str(e), noiselevel=-1)
+ if myfile not in mydigests:
+ # We don't have a digest, but the file exists. We must
+ # assume that it is fully downloaded.
+ continue
+ else:
+ if mystat.st_size < mydigests[myfile]["size"] and \
+ not restrict_fetch:
+ fetched = 1 # Try to resume this download.
+ else:
+ verified_ok, reason = portage_checksum.verify_all(
+ myfile_path, mydigests[myfile])
+ if not verified_ok:
+ writemsg("!!! Previously fetched" + \
+ " file: '%s'\n" % myfile, noiselevel=-1)
+ writemsg("!!! Reason: %s\n" % reason[0],
+ noiselevel=-1)
+ writemsg(("!!! Got: %s\n" + \
+ "!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+ if can_fetch and not restrict_fetch:
+ writemsg("Refetching...\n\n",
+ noiselevel=-1)
+ os.unlink(myfile_path)
+ else:
+ eout = output.EOutput()
+ eout.quiet = \
+ mysettings.get("PORTAGE_QUIET", None) == "1"
+ for digest_name in mydigests[myfile]:
+ eout.ebegin(
+ "%s %s ;-)" % (myfile, digest_name))
+ eout.eend(0)
+ continue # fetch any remaining files
+
+ for loc in filedict[myfile]:
+ if listonly:
+ writemsg_stdout(loc+" ", noiselevel=-1)
+ continue
+ # allow different fetchcommands per protocol
+ protocol = loc[0:loc.find("://")]
+ if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
+ fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
+ else:
+ fetchcommand=mysettings["FETCHCOMMAND"]
+ if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
+ resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
+ else:
+ resumecommand=mysettings["RESUMECOMMAND"]
+
+ fetchcommand=fetchcommand.replace("${DISTDIR}",mysettings["DISTDIR"])
+ resumecommand=resumecommand.replace("${DISTDIR}",mysettings["DISTDIR"])
+
+ if not can_fetch:
+ if fetched != 2:
+ if fetched == 0:
+ writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,
+ noiselevel=-1)
+ else:
+ writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,
+ noiselevel=-1)
+ for var_name in ("FETCHCOMMAND", "RESUMECOMMAND"):
+ if not mysettings.get(var_name, None):
+ writemsg(("!!! %s is unset. It should " + \
+ "have been defined in /etc/make.globals.\n") \
+ % var_name, noiselevel=-1)
+ return 0
+ else:
+ continue
+
+ if fetched != 2:
+ #we either need to resume or start the download
+ #you can't use "continue" when you're inside a "try" block
+ if fetched==1:
+ #resume mode:
+ writemsg(">>> Resuming download...\n")
+ locfetch=resumecommand
+ else:
+ #normal mode:
+ locfetch=fetchcommand
+ writemsg_stdout(">>> Downloading '%s'\n" % \
+ re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
+ myfetch=locfetch.replace("${URI}",loc)
+ myfetch=myfetch.replace("${FILE}",myfile)
+
+ spawn_keywords = {}
+ if "userfetch" in mysettings.features and \
+ os.getuid() == 0 and portage_gid and portage_uid:
+ spawn_keywords.update({
+ "uid" : portage_uid,
+ "gid" : portage_gid,
+ "groups" : userpriv_groups,
+ "umask" : 002})
+
+ try:
+
+ if mysettings.selinux_enabled():
+ con = selinux.getcontext()
+ con = con.replace(mysettings["PORTAGE_T"], mysettings["PORTAGE_FETCH_T"])
+ selinux.setexec(con)
+
+ myret = portage_exec.spawn_bash(myfetch,
+ env=mysettings.environ(), **spawn_keywords)
+
+ if mysettings.selinux_enabled():
+ selinux.setexec(None)
+
+ finally:
+ try:
+ apply_secpass_permissions(myfile_path,
+ gid=portage_gid, mode=0664, mask=02)
+ except portage_exception.FileNotFound, e:
+ pass
+ except portage_exception.PortageException, e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg("!!! Failed to adjust permissions:" + \
+ " %s\n" % str(e), noiselevel=-1)
+
+ if mydigests!=None and mydigests.has_key(myfile):
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ fetched = 0
+ else:
+ # no exception? file exists. let digestcheck() report
+ # an appropriately for size or checksum errors
+ if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
+ # Fetch failed... Try the next one... Kill 404 files though.
+ if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
+ html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
+ if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
+ try:
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
+ fetched = 0
+ continue
+ except (IOError, OSError):
+ pass
+ fetched = 1
+ continue
+ if not fetchonly:
+ fetched=2
+ break
+ else:
+ # File is the correct size--check the checksums for the fetched
+ # file NOW, for those users who don't have a stable/continuous
+ # net connection. This way we have a chance to try to download
+ # from another mirror...
+ verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
+ if not verified_ok:
+ print reason
+ writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n",
+ noiselevel=-1)
+ writemsg("!!! Reason: "+reason[0]+"\n",
+ noiselevel=-1)
+ writemsg("!!! Got: %s\n!!! Expected: %s\n" % \
+ (reason[1], reason[2]), noiselevel=-1)
+ writemsg("Removing corrupt distfile...\n", noiselevel=-1)
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ fetched=0
+ else:
+ eout = output.EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
+ for x_key in mydigests[myfile].keys():
+ eout.ebegin("%s %s ;-)" % (myfile, x_key))
+ eout.eend(0)
+ fetched=2
+ break
+ else:
+ if not myret:
+ fetched=2
+ break
+ elif mydigests!=None:
+ writemsg("No digest file available and download failed.\n\n",
+ noiselevel=-1)
+ finally:
+ if use_locks and file_lock:
+ portage_locks.unlockfile(file_lock)
+
+ if listonly:
+ writemsg_stdout("\n", noiselevel=-1)
+ if fetched != 2:
+ if restrict_fetch:
+ print "\n!!!", mysettings["CATEGORY"] + "/" + \
+ mysettings["PF"], "has fetch restriction turned on."
+ print "!!! This probably means that this " + \
+ "ebuild's files must be downloaded"
+ print "!!! manually. See the comments in" + \
+ " the ebuild for more information.\n"
+ spawn(EBUILD_SH_BINARY + " nofetch", mysettings)
+ elif listonly:
+ continue
+ elif not filedict[myfile]:
+ writemsg("Warning: No mirrors available for file" + \
+ " '%s'\n" % (myfile), noiselevel=-1)
+ else:
+ writemsg("!!! Couldn't download '%s'. Aborting.\n" % myfile,
+ noiselevel=-1)
+ return 0
+ return 1
+
+def digestgen(myarchives, mysettings, overwrite=1, manifestonly=0, myportdb=None):
+ """
+ Generates a digest file if missing. Assumes all files are available.
+ DEPRECATED: this now only is a compability wrapper for
+ portage_manifest.Manifest()
+ NOTE: manifestonly and overwrite are useless with manifest2 and
+ are therefore ignored."""
+ if myportdb is None:
+ writemsg("Warning: myportdb not specified to digestgen\n")
+ global portdb
+ myportdb = portdb
+ global _doebuild_manifest_exempt_depend
+ try:
+ _doebuild_manifest_exempt_depend += 1
+ distfiles_map = {}
+ fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
+ for cpv, fetchlist in fetchlist_dict.iteritems():
+ for myfile in fetchlist:
+ distfiles_map.setdefault(myfile, []).append(cpv)
+ mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
+ fetchlist_dict=fetchlist_dict)
+ # Don't require all hashes since that can trigger excessive
+ # fetches when sufficient digests already exist. To ease transition
+ # while Manifest 1 is being removed, only require hashes that will
+ # exist before and after the transition.
+ required_hash_types = set(portage_const.MANIFEST1_HASH_FUNCTIONS
+ ).intersection(portage_const.MANIFEST2_HASH_FUNCTIONS)
+ required_hash_types.add("size")
+ dist_hashes = mf.fhashdict.get("DIST", {})
+ missing_hashes = set()
+ for myfile in distfiles_map:
+ myhashes = dist_hashes.get(myfile)
+ if not myhashes:
+ missing_hashes.add(myfile)
+ continue
+ if required_hash_types.difference(myhashes):
+ missing_hashes.add(myfile)
+ if missing_hashes:
+ missing_files = []
+ for myfile in missing_hashes:
+ try:
+ os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ missing_files.append(myfile)
+ if missing_files:
+ mytree = os.path.realpath(os.path.dirname(
+ os.path.dirname(mysettings["O"])))
+ myuris = []
+ for myfile in missing_files:
+ for cpv in distfiles_map[myfile]:
+ alluris, aalist = myportdb.getfetchlist(
+ cpv, mytree=mytree, all=True,
+ mysettings=mysettings)
+ for uri in alluris:
+ if os.path.basename(uri) == myfile:
+ myuris.append(uri)
+ if not fetch(myuris, mysettings):
+ writemsg(("!!! File %s doesn't exist, can't update " + \
+ "Manifest\n") % myfile, noiselevel=-1)
+ return 0
+ writemsg_stdout(">>> Creating Manifest for %s\n" % mysettings["O"])
+ try:
+ mf.create(requiredDistfiles=myarchives,
+ assumeDistHashesSometimes=True,
+ assumeDistHashesAlways=(
+ "assume-digests" in mysettings.features))
+ except portage_exception.FileNotFound, e:
+ writemsg(("!!! File %s doesn't exist, can't update " + \
+ "Manifest\n") % e, noiselevel=-1)
+ return 0
+ mf.write(sign=False)
+ if "assume-digests" not in mysettings.features:
+ distlist = mf.fhashdict.get("DIST", {}).keys()
+ distlist.sort()
+ auto_assumed = []
+ for filename in distlist:
+ if not os.path.exists(
+ os.path.join(mysettings["DISTDIR"], filename)):
+ auto_assumed.append(filename)
+ if auto_assumed:
+ mytree = os.path.realpath(
+ os.path.dirname(os.path.dirname(mysettings["O"])))
+ cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
+ pkgs = myportdb.cp_list(cp, mytree=mytree)
+ pkgs.sort()
+ writemsg_stdout(" digest.assumed" + output.colorize("WARN",
+ str(len(auto_assumed)).rjust(18)) + "\n")
+ for pkg_key in pkgs:
+ fetchlist = myportdb.getfetchlist(pkg_key,
+ mysettings=mysettings, all=True, mytree=mytree)[1]
+ pv = pkg_key.split("/")[1]
+ for filename in auto_assumed:
+ if filename in fetchlist:
+ writemsg_stdout(
+ " digest-%s::%s\n" % (pv, filename))
+ return 1
+ finally:
+ _doebuild_manifest_exempt_depend -= 1
+
+def digestParseFile(myfilename, mysettings=None):
+ """(filename) -- Parses a given file for entries matching:
+ <checksumkey> <checksum_hex_string> <filename> <filesize>
+ Ignores lines that don't start with a valid checksum identifier
+ and returns a dict with the filenames as keys and {checksumkey:checksum}
+ as the values.
+ DEPRECATED: this function is now only a compability wrapper for
+ portage_manifest.Manifest()."""
+
+ mysplit = myfilename.split(os.sep)
+ if mysplit[-2] == "files" and mysplit[-1].startswith("digest-"):
+ pkgdir = os.sep + os.sep.join(mysplit[:-2]).strip(os.sep)
+ elif mysplit[-1] == "Manifest":
+ pkgdir = os.sep + os.sep.join(mysplit[:-1]).strip(os.sep)
+
+ if mysettings is None:
+ global settings
+ mysettings = config(clone=settings)
+
+ return Manifest(pkgdir, mysettings["DISTDIR"]).getDigests()
+
+def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
+ """Verifies checksums. Assumes all files have been downloaded.
+ DEPRECATED: this is now only a compability wrapper for
+ portage_manifest.Manifest()."""
+ if not strict:
+ return 1
+ pkgdir = mysettings["O"]
+ manifest_path = os.path.join(pkgdir, "Manifest")
+ if not os.path.exists(manifest_path):
+ writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
+ noiselevel=-1)
+ if strict:
+ return 0
+ mf = Manifest(pkgdir, mysettings["DISTDIR"])
+ eout = output.EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
+ try:
+ eout.ebegin("checking ebuild checksums ;-)")
+ mf.checkTypeHashes("EBUILD")
+ eout.eend(0)
+ eout.ebegin("checking auxfile checksums ;-)")
+ mf.checkTypeHashes("AUX")
+ eout.eend(0)
+ eout.ebegin("checking miscfile checksums ;-)")
+ mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
+ eout.eend(0)
+ for f in myfiles:
+ eout.ebegin("checking %s ;-)" % f)
+ mf.checkFileHashes(mf.findFile(f), f)
+ eout.eend(0)
+ except KeyError, e:
+ eout.eend(1)
+ writemsg("\n!!! Missing digest for %s\n" % str(e), noiselevel=-1)
+ return 0
+ except portage_exception.FileNotFound, e:
+ eout.eend(1)
+ writemsg("\n!!! A file listed in the Manifest could not be found: %s\n" % str(e),
+ noiselevel=-1)
+ return 0
+ except portage_exception.DigestException, e:
+ eout.eend(1)
+ writemsg("\n!!! Digest verification failed:\n", noiselevel=-1)
+ writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
+ writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
+ writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
+ writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
+ return 0
+ # Make sure that all of the ebuilds are actually listed in the Manifest.
+ for f in os.listdir(pkgdir):
+ if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
+ writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
+ os.path.join(pkgdir, f), noiselevel=-1)
+ return 0
+ """ epatch will just grab all the patches out of a directory, so we have to
+ make sure there aren't any foreign files that it might grab."""
+ filesdir = os.path.join(pkgdir, "files")
+ for parent, dirs, files in os.walk(filesdir):
+ for d in dirs:
+ if d.startswith(".") or d == "CVS":
+ dirs.remove(d)
+ for f in files:
+ if f.startswith("."):
+ continue
+ f = os.path.join(parent, f)[len(filesdir) + 1:]
+ file_type = mf.findFile(f)
+ if file_type != "AUX" and not f.startswith("digest-"):
+ writemsg("!!! A file is not listed in the Manifest: '%s'\n" % \
+ os.path.join(filesdir, f), noiselevel=-1)
+ return 0
+ return 1
+
+# parse actionmap to spawn ebuild with the appropriate args
+def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
+ if alwaysdep or "noauto" not in mysettings.features:
+ # process dependency first
+ if "dep" in actionmap[mydo].keys():
+ retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
+ if retval:
+ return retval
+ kwargs = actionmap[mydo]["args"]
+ mysettings["EBUILD_PHASE"] = mydo
+ phase_retval = spawn(actionmap[mydo]["cmd"] % mydo, mysettings, debug=debug, logfile=logfile, **kwargs)
+ mysettings["EBUILD_PHASE"] = ""
+
+ if not kwargs["droppriv"] and secpass >= 2:
+ """ Privileged phases may have left files that need to be made
+ writable to a less privileged user."""
+ apply_recursive_permissions(mysettings["T"],
+ uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
+ filemode=060, filemask=0)
+
+ if phase_retval == os.EX_OK:
+ if mydo == "install":
+ # User and group bits that match the "portage" user or group are
+ # automatically mapped to PORTAGE_INST_UID and PORTAGE_INST_GID if
+ # necessary. The chown system call may clear S_ISUID and S_ISGID
+ # bits, so those bits are restored if necessary.
+ inst_uid = int(mysettings["PORTAGE_INST_UID"])
+ inst_gid = int(mysettings["PORTAGE_INST_GID"])
+ for parent, dirs, files in os.walk(mysettings["D"]):
+ for fname in chain(dirs, files):
+ fpath = os.path.join(parent, fname)
+ mystat = os.lstat(fpath)
+ if mystat.st_uid != portage_uid and \
+ mystat.st_gid != portage_gid:
+ continue
+ myuid = -1
+ mygid = -1
+ if mystat.st_uid == portage_uid:
+ myuid = inst_uid
+ if mystat.st_gid == portage_gid:
+ mygid = inst_gid
+ apply_secpass_permissions(fpath, uid=myuid, gid=mygid,
+ mode=mystat.st_mode, stat_cached=mystat,
+ follow_links=False)
+ mycommand = " ".join([MISC_SH_BINARY, "install_qa_check", "install_symlink_html_docs"])
+ qa_retval = spawn(mycommand, mysettings, debug=debug, logfile=logfile, **kwargs)
+ if qa_retval:
+ writemsg("!!! install_qa_check failed; exiting.\n",
+ noiselevel=-1)
+ return qa_retval
+ return phase_retval
+
+
+def eapi_is_supported(eapi):
+ return str(eapi).strip() == str(portage_const.EAPI).strip()
+
+def doebuild_environment(myebuild, mydo, myroot, mysettings, debug, use_cache, mydbapi):
+
+ ebuild_path = os.path.abspath(myebuild)
+ pkg_dir = os.path.dirname(ebuild_path)
+
+ if mysettings.configdict["pkg"].has_key("CATEGORY"):
+ cat = mysettings.configdict["pkg"]["CATEGORY"]
+ else:
+ cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
+ mypv = os.path.basename(ebuild_path)[:-7]
+ mycpv = cat+"/"+mypv
+ mysplit=pkgsplit(mypv,silent=0)
+ if mysplit is None:
+ raise portage_exception.IncorrectParameter(
+ "Invalid ebuild path: '%s'" % myebuild)
+
+ if mydo != "depend":
+ """For performance reasons, setcpv only triggers reset when it
+ detects a package-specific change in config. For the ebuild
+ environment, a reset call is forced in order to ensure that the
+ latest env.d variables are used."""
+ mysettings.reset(use_cache=use_cache)
+ mysettings.setcpv(mycpv, use_cache=use_cache, mydb=mydbapi)
+
+ mysettings["EBUILD_PHASE"] = mydo
+
+ mysettings["PORTAGE_MASTER_PID"] = str(os.getpid())
+
+ # We are disabling user-specific bashrc files.
+ mysettings["BASH_ENV"] = INVALID_ENV_FILE
+
+ if debug: # Otherwise it overrides emerge's settings.
+ # We have no other way to set debug... debug can't be passed in
+ # due to how it's coded... Don't overwrite this so we can use it.
+ mysettings["PORTAGE_DEBUG"] = "1"
+
+ mysettings["ROOT"] = myroot
+ mysettings["STARTDIR"] = getcwd()
+
+ mysettings["EBUILD"] = ebuild_path
+ mysettings["O"] = pkg_dir
+ mysettings.configdict["pkg"]["CATEGORY"] = cat
+ mysettings["FILESDIR"] = pkg_dir+"/files"
+ mysettings["PF"] = mypv
+
+ mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
+ mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
+
+ mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)+"\n"+CUSTOM_PROFILE_PATH
+ mysettings["P"] = mysplit[0]+"-"+mysplit[1]
+ mysettings["PN"] = mysplit[0]
+ mysettings["PV"] = mysplit[1]
+ mysettings["PR"] = mysplit[2]
+
+ if portage_util.noiselimit < 0:
+ mysettings["PORTAGE_QUIET"] = "1"
+
+ if mydo != "depend":
+ eapi, mysettings["INHERITED"], mysettings["SLOT"], mysettings["RESTRICT"] = \
+ mydbapi.aux_get(mycpv, ["EAPI", "INHERITED", "SLOT", "RESTRICT"])
+ if not eapi_is_supported(eapi):
+ # can't do anything with this.
+ raise portage_exception.UnsupportedAPIException(mycpv, eapi)
+ mysettings["PORTAGE_RESTRICT"] = " ".join(flatten(
+ portage_dep.use_reduce(portage_dep.paren_reduce(
+ mysettings["RESTRICT"]), uselist=mysettings["USE"].split())))
+
+ if mysplit[2] == "r0":
+ mysettings["PVR"]=mysplit[1]
+ else:
+ mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
+
+ if mysettings.has_key("PATH"):
+ mysplit=mysettings["PATH"].split(":")
+ else:
+ mysplit=[]
+ if PORTAGE_BIN_PATH not in mysplit:
+ mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
+
+ # Sandbox needs cannonical paths.
+ mysettings["PORTAGE_TMPDIR"] = os.path.realpath(
+ mysettings["PORTAGE_TMPDIR"])
+ mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
+ mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/binpkgs"
+
+ # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
+ # locations in order to prevent interference.
+ if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
+ mysettings["PORTAGE_BUILDDIR"] = os.path.join(
+ mysettings["PKG_TMPDIR"],
+ mysettings["CATEGORY"], mysettings["PF"])
+ else:
+ mysettings["PORTAGE_BUILDDIR"] = os.path.join(
+ mysettings["BUILD_PREFIX"],
+ mysettings["CATEGORY"], mysettings["PF"])
+
+ mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
+ mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
+ mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
+ mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
+
+ mysettings["PORTAGE_BASHRC"] = os.path.join(
+ mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE.lstrip(os.path.sep))
+
+ #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
+ if (mydo!="depend") or not mysettings.has_key("KV"):
+ mykv,err1=ExtractKernelVersion(os.path.join(myroot, "usr/src/linux"))
+ if mykv:
+ # Regular source tree
+ mysettings["KV"]=mykv
+ else:
+ mysettings["KV"]=""
+
+ if (mydo!="depend") or not mysettings.has_key("KVERS"):
+ myso=os.uname()[2]
+ mysettings["KVERS"]=myso[1]
+
+ # Allow color.map to control colors associated with einfo, ewarn, etc...
+ mycolors = []
+ for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
+ mycolors.append("%s=$'%s'" % (c, output.codes[c]))
+ mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
+
+def prepare_build_dirs(myroot, mysettings, cleanup):
+
+ clean_dirs = [mysettings["HOME"]]
+
+ # We enable cleanup when we want to make sure old cruft (such as the old
+ # environment) doesn't interfere with the current phase.
+ if cleanup:
+ clean_dirs.append(mysettings["T"])
+
+ for clean_dir in clean_dirs:
+ try:
+ shutil.rmtree(clean_dir)
+ except OSError, oe:
+ if errno.ENOENT == oe.errno:
+ pass
+ elif errno.EPERM == oe.errno:
+ writemsg("%s\n" % oe, noiselevel=-1)
+ writemsg("Operation Not Permitted: rmtree('%s')\n" % \
+ clean_dir, noiselevel=-1)
+ return 1
+ else:
+ raise
+
+ def makedirs(dir_path):
+ try:
+ os.makedirs(dir_path)
+ except OSError, oe:
+ if errno.EEXIST == oe.errno:
+ pass
+ elif errno.EPERM == oe.errno:
+ writemsg("%s\n" % oe, noiselevel=-1)
+ writemsg("Operation Not Permitted: makedirs('%s')\n" % \
+ dir_path, noiselevel=-1)
+ return False
+ else:
+ raise
+ return True
+
+ mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
+
+ mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
+ mydirs.append(os.path.dirname(mydirs[-1]))
+
+ try:
+ for mydir in mydirs:
+ portage_util.ensure_dirs(mydir)
+ portage_util.apply_secpass_permissions(mydir,
+ gid=portage_gid, uid=portage_uid, mode=070, mask=0)
+ for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
+ """These directories don't necessarily need to be group writable.
+ However, the setup phase is commonly run as a privileged user prior
+ to the other phases being run by an unprivileged user. Currently,
+ we use the portage group to ensure that the unprivleged user still
+ has write access to these directories in any case."""
+ portage_util.ensure_dirs(mysettings[dir_key], mode=0775)
+ portage_util.apply_secpass_permissions(mysettings[dir_key],
+ uid=portage_uid, gid=portage_gid)
+ except portage_exception.PermissionDenied, e:
+ writemsg("Permission Denied: %s\n" % str(e), noiselevel=-1)
+ return 1
+ except portage_exception.OperationNotPermitted, e:
+ writemsg("Operation Not Permitted: %s\n" % str(e), noiselevel=-1)
+ return 1
+ except portage_exception.FileNotFound, e:
+ writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
+ return 1
+
+ features_dirs = {
+ "ccache":{
+ "basedir_var":"CCACHE_DIR",
+ "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
+ "always_recurse":False},
+ "confcache":{
+ "basedir_var":"CONFCACHE_DIR",
+ "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache"),
+ "always_recurse":True},
+ "distcc":{
+ "basedir_var":"DISTCC_DIR",
+ "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
+ "subdirs":("lock", "state"),
+ "always_recurse":True}
+ }
+ dirmode = 02070
+ filemode = 060
+ modemask = 02
+ for myfeature, kwargs in features_dirs.iteritems():
+ if myfeature in mysettings.features:
+ basedir = mysettings[kwargs["basedir_var"]]
+ if basedir == "":
+ basedir = kwargs["default_dir"]
+ mysettings[kwargs["basedir_var"]] = basedir
+ try:
+ mydirs = [mysettings[kwargs["basedir_var"]]]
+ if "subdirs" in kwargs:
+ for subdir in kwargs["subdirs"]:
+ mydirs.append(os.path.join(basedir, subdir))
+ for mydir in mydirs:
+ modified = portage_util.ensure_dirs(mydir,
+ gid=portage_gid, mode=dirmode, mask=modemask)
+ # To avoid excessive recursive stat calls, we trigger
+ # recursion when the top level directory does not initially
+ # match our permission requirements.
+ if modified or kwargs["always_recurse"]:
+ if modified:
+ writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
+ noiselevel=-1)
+ def onerror(e):
+ raise # The feature is disabled if a single error
+ # occurs during permissions adjustment.
+ if not apply_recursive_permissions(mydir,
+ gid=portage_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise portage_exception.OperationNotPermitted(
+ "Failed to apply recursive permissions for the portage group.")
+ except portage_exception.PortageException, e:
+ mysettings.features.remove(myfeature)
+ mysettings["FEATURES"] = " ".join(mysettings.features)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg("!!! Failed resetting perms on %s='%s'\n" % \
+ (kwargs["basedir_var"], basedir), noiselevel=-1)
+ writemsg("!!! Disabled FEATURES='%s'\n" % myfeature,
+ noiselevel=-1)
+ time.sleep(5)
+
+ workdir_mode = 0700
+ try:
+ mode = mysettings["PORTAGE_WORKDIR_MODE"]
+ if mode.isdigit():
+ parsed_mode = int(mode, 8)
+ elif mode == "":
+ raise KeyError()
+ else:
+ raise ValueError()
+ if parsed_mode & 07777 != parsed_mode:
+ raise ValueError("Invalid file mode: %s" % mode)
+ else:
+ workdir_mode = parsed_mode
+ except KeyError, e:
+ writemsg("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n" % oct(workdir_mode))
+ except ValueError, e:
+ if len(str(e)) > 0:
+ writemsg("%s\n" % e)
+ writemsg("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n" % \
+ (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
+ mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode)
+ try:
+ apply_secpass_permissions(mysettings["WORKDIR"],
+ uid=portage_uid, gid=portage_gid, mode=workdir_mode)
+ except portage_exception.FileNotFound:
+ pass # ebuild.sh will create it
+
+ if mysettings.get("PORT_LOGDIR", "") == "":
+ while "PORT_LOGDIR" in mysettings:
+ del mysettings["PORT_LOGDIR"]
+ if "PORT_LOGDIR" in mysettings:
+ try:
+ portage_util.ensure_dirs(mysettings["PORT_LOGDIR"],
+ uid=portage_uid, gid=portage_gid, mode=02770)
+ except portage_exception.PortageException, e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg("!!! Permission issues with PORT_LOGDIR='%s'\n" % \
+ mysettings["PORT_LOGDIR"], noiselevel=-1)
+ writemsg("!!! Disabling logging.\n", noiselevel=-1)
+ while "PORT_LOGDIR" in mysettings:
+ del mysettings["PORT_LOGDIR"]
+ if "PORT_LOGDIR" in mysettings:
+ logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
+ if not os.path.exists(logid_path):
+ f = open(logid_path, "w")
+ f.close()
+ del f
+ logid_time = time.strftime("%Y%m%d-%H%M%S",
+ time.gmtime(os.stat(logid_path).st_mtime))
+ mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+ mysettings["PORT_LOGDIR"], "%s:%s:%s.log" % \
+ (mysettings["CATEGORY"], mysettings["PF"], logid_time))
+ del logid_path, logid_time
+ else:
+ # When sesandbox is enabled, only log if PORT_LOGDIR is explicitly
+ # enabled since it is possible that local SELinux security policies
+ # do not allow ouput to be piped out of the sesandbox domain.
+ if not (mysettings.selinux_enabled() and \
+ "sesandbox" in mysettings.features):
+ mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+ mysettings["T"], "build.log")
+
+_doebuild_manifest_exempt_depend = 0
+_doebuild_manifest_checked = None
+
+def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
+ fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
+ mydbapi=None, vartree=None, prev_mtimes=None):
+
+ """
+ Wrapper function that invokes specific ebuild phases through the spawning
+ of ebuild.sh
+
+ @param myebuild: name of the ebuild to invoke the phase on (CPV)
+ @type myebuild: String
+ @param mydo: Phase to run
+ @type mydo: String
+ @param myroot: $ROOT (usually '/', see man make.conf)
+ @type myroot: String
+ @param mysettings: Portage Configuration
+ @type mysettings: instance of portage.config
+ @param debug: Turns on various debug information (eg, debug for spawn)
+ @type debug: Boolean
+ @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
+ @type listonly: Boolean
+ @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
+ @type fetchonly: Boolean
+ @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
+ @type cleanup: Boolean
+ @param dbkey: A dict (usually keys and values from the depend phase, such as KEYWORDS, USE, etc..)
+ @type dbkey: Dict or String
+ @param use_cache: Enables the cache
+ @type use_cache: Boolean
+ @param fetchall: Used to wrap fetch(), fetches all URI's (even ones invalid due to USE conditionals)
+ @type fetchall: Boolean
+ @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
+ @type tree: String
+ @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
+ @type mydbapi: portdbapi instance
+ @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
+ @type vartree: vartree instance
+ @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
+ @type prev_mtimes: dictionary
+ @rtype: Boolean
+ @returns:
+ 1. 0 for success
+ 2. 1 for error
+
+ Most errors have an accompanying error message.
+
+ listonly and fetchonly are only really necessary for operations involving 'fetch'
+ prev_mtimes are only necessary for merge operations.
+ Other variables may not be strictly required, many have defaults that are set inside of doebuild.
+
+ """
+
+ if not tree:
+ writemsg("Warning: tree not specified to doebuild\n")
+ tree = "porttree"
+ global db
+
+ # chunked out deps for each phase, so that ebuild binary can use it
+ # to collapse targets down.
+ actionmap_deps={
+ "depend": [],
+ "setup": [],
+ "unpack": ["setup"],
+ "compile":["unpack"],
+ "test": ["compile"],
+ "install":["test"],
+ "rpm": ["install"],
+ "package":["install"],
+ }
+
+ if mydbapi is None:
+ mydbapi = db[myroot][tree].dbapi
+
+ if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
+ vartree = db[myroot]["vartree"]
+
+ features = mysettings.features
+
+ validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
+ "config","setup","depend","fetch","digest",
+ "unpack","compile","test","install","rpm","qmerge","merge",
+ "package","unmerge", "manifest"]
+
+ if mydo not in validcommands:
+ validcommands.sort()
+ writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
+ noiselevel=-1)
+ for vcount in range(len(validcommands)):
+ if vcount%6 == 0:
+ writemsg("\n!!! ", noiselevel=-1)
+ writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ return 1
+
+ if not os.path.exists(myebuild):
+ writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
+ noiselevel=-1)
+ return 1
+
+ global _doebuild_manifest_exempt_depend
+
+ if "strict" in features and \
+ "digest" not in features and \
+ tree == "porttree" and \
+ mydo not in ("digest", "manifest", "help") and \
+ not _doebuild_manifest_exempt_depend:
+ # Always verify the ebuild checksums before executing it.
+ pkgdir = os.path.dirname(myebuild)
+ manifest_path = os.path.join(pkgdir, "Manifest")
+ global _doebuild_manifest_checked
+ # Avoid checking the same Manifest several times in a row during a
+ # regen with an empty cache.
+ if _doebuild_manifest_checked != manifest_path:
+ if not os.path.exists(manifest_path):
+ writemsg("!!! Manifest file not found: '%s'\n" % manifest_path,
+ noiselevel=-1)
+ return 1
+ mf = Manifest(pkgdir, mysettings["DISTDIR"])
+ try:
+ mf.checkTypeHashes("EBUILD")
+ except portage_exception.FileNotFound, e:
+ writemsg("!!! A file listed in the Manifest " + \
+ "could not be found: %s\n" % str(e), noiselevel=-1)
+ return 1
+ except portage_exception.DigestException, e:
+ writemsg("!!! Digest verification failed:\n", noiselevel=-1)
+ writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
+ writemsg("!!! Reason: %s\n" % e.value[1], noiselevel=-1)
+ writemsg("!!! Got: %s\n" % e.value[2], noiselevel=-1)
+ writemsg("!!! Expected: %s\n" % e.value[3], noiselevel=-1)
+ return 1
+ # Make sure that all of the ebuilds are actually listed in the
+ # Manifest.
+ for f in os.listdir(pkgdir):
+ if f.endswith(".ebuild") and not mf.hasFile("EBUILD", f):
+ writemsg("!!! A file is not listed in the " + \
+ "Manifest: '%s'\n" % os.path.join(pkgdir, f),
+ noiselevel=-1)
+ return 1
+ _doebuild_manifest_checked = manifest_path
+
+ logfile=None
+ builddir_lock = None
+ try:
+ if mydo in ("digest", "manifest", "help"):
+ # Temporarily exempt the depend phase from manifest checks, in case
+ # aux_get calls trigger cache generation.
+ _doebuild_manifest_exempt_depend += 1
+
+ doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
+ use_cache, mydbapi)
+
+ # get possible slot information from the deps file
+ if mydo == "depend":
+ writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
+ if isinstance(dbkey, dict):
+ mysettings["dbkey"] = ""
+ pr, pw = os.pipe()
+ fd_pipes = {0:0, 1:1, 2:2, 9:pw}
+ mypids = spawn(EBUILD_SH_BINARY + " depend", mysettings,
+ fd_pipes=fd_pipes, returnpid=True)
+ os.close(pw) # belongs exclusively to the child process now
+ maxbytes = 1024
+ mybytes = []
+ while True:
+ mybytes.append(os.read(pr, maxbytes))
+ if not mybytes[-1]:
+ break
+ os.close(pr)
+ mybytes = "".join(mybytes)
+ global auxdbkeys
+ for k, v in izip(auxdbkeys, mybytes.splitlines()):
+ dbkey[k] = v
+ retval = os.waitpid(mypids[0], 0)[1]
+ portage_exec.spawned_pids.remove(mypids[0])
+ # If it got a signal, return the signal that was sent, but
+ # shift in order to distinguish it from a return value. (just
+ # like portage_exec.spawn() would do).
+ if retval & 0xff:
+ return (retval & 0xff) << 8
+ # Otherwise, return its exit code.
+ return retval >> 8
+ elif dbkey:
+ mysettings["dbkey"] = dbkey
+ else:
+ mysettings["dbkey"] = \
+ os.path.join(mysettings.depcachedir, "aux_db_key_temp")
+
+ return spawn(EBUILD_SH_BINARY + " depend", mysettings)
+
+ # Validate dependency metadata here to ensure that ebuilds with invalid
+ # data are never installed (even via the ebuild command).
+ invalid_dep_exempt_phases = \
+ set(["clean", "cleanrm", "help", "prerm", "postrm"])
+ mycpv = mysettings["CATEGORY"] + "/" + mysettings["PF"]
+ dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+ metadata = dict(izip(dep_keys, mydbapi.aux_get(mycpv, dep_keys)))
+ class FakeTree(object):
+ def __init__(self, mydb):
+ self.dbapi = mydb
+ dep_check_trees = {myroot:{}}
+ dep_check_trees[myroot]["porttree"] = \
+ FakeTree(fakedbapi(settings=mysettings))
+ for dep_type in dep_keys:
+ mycheck = dep_check(metadata[dep_type], None, mysettings,
+ myuse="all", myroot=myroot, trees=dep_check_trees)
+ if not mycheck[0]:
+ writemsg("%s: %s\n%s\n" % (
+ dep_type, metadata[dep_type], mycheck[1]), noiselevel=-1)
+ if mydo not in invalid_dep_exempt_phases:
+ return 1
+ del dep_type, mycheck
+ del mycpv, dep_keys, metadata, FakeTree, dep_check_trees
+
+ if "PORTAGE_TMPDIR" not in mysettings or \
+ not os.path.isdir(mysettings["PORTAGE_TMPDIR"]):
+ writemsg("The directory specified in your " + \
+ "PORTAGE_TMPDIR variable, '%s',\n" % \
+ mysettings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
+ writemsg("does not exist. Please create this directory or " + \
+ "correct your PORTAGE_TMPDIR setting.\n", noiselevel=-1)
+ return 1
+
+ # Build directory creation isn't required for any of these.
+ if mydo not in ("digest", "fetch", "help", "manifest"):
+ mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
+ if mystatus:
+ return mystatus
+ # PORTAGE_LOG_FILE is set above by the prepare_build_dirs() call.
+ logfile = mysettings.get("PORTAGE_LOG_FILE", None)
+ if mydo == "unmerge":
+ return unmerge(mysettings["CATEGORY"],
+ mysettings["PF"], myroot, mysettings, vartree=vartree)
+
+ # if any of these are being called, handle them -- running them out of
+ # the sandbox -- and stop now.
+ if mydo in ["clean","cleanrm"]:
+ return spawn(EBUILD_SH_BINARY + " clean", mysettings,
+ debug=debug, free=1, logfile=None)
+ elif mydo == "help":
+ return spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
+ debug=debug, free=1, logfile=logfile)
+ elif mydo == "setup":
+ infodir = os.path.join(
+ mysettings["PORTAGE_BUILDDIR"], "build-info")
+ if os.path.isdir(infodir):
+ """Load USE flags for setup phase of a binary package.
+ Ideally, the environment.bz2 would be used instead."""
+ mysettings.load_infodir(infodir)
+ retval = spawn(EBUILD_SH_BINARY + " " + mydo, mysettings,
+ debug=debug, free=1, logfile=logfile)
+ if secpass >= 2:
+ """ Privileged phases may have left files that need to be made
+ writable to a less privileged user."""
+ apply_recursive_permissions(mysettings["T"],
+ uid=portage_uid, gid=portage_gid, dirmode=070, dirmask=0,
+ filemode=060, filemask=0)
+ return retval
+ elif mydo == "preinst":
+ mysettings["IMAGE"] = mysettings["D"]
+ phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
+ mysettings, debug=debug, free=1, logfile=logfile)
+ if phase_retval == os.EX_OK:
+ # Post phase logic and tasks that have been factored out of
+ # ebuild.sh.
+ myargs = [MISC_SH_BINARY, "preinst_bsdflags", "preinst_mask",
+ "preinst_sfperms", "preinst_selinux_labels",
+ "preinst_suid_scan"]
+ mysettings["EBUILD_PHASE"] = ""
+ phase_retval = spawn(" ".join(myargs),
+ mysettings, debug=debug, free=1, logfile=logfile)
+ if phase_retval != os.EX_OK:
+ writemsg("!!! post preinst failed; exiting.\n",
+ noiselevel=-1)
+ del mysettings["IMAGE"]
+ return phase_retval
+ elif mydo == "postinst":
+ mysettings.load_infodir(mysettings["O"])
+ phase_retval = spawn(" ".join((EBUILD_SH_BINARY, mydo)),
+ mysettings, debug=debug, free=1, logfile=logfile)
+ if phase_retval == os.EX_OK:
+ # Post phase logic and tasks that have been factored out of
+ # ebuild.sh.
+ myargs = [MISC_SH_BINARY, "postinst_bsdflags"]
+ mysettings["EBUILD_PHASE"] = ""
+ phase_retval = spawn(" ".join(myargs),
+ mysettings, debug=debug, free=1, logfile=logfile)
+ if phase_retval != os.EX_OK:
+ writemsg("!!! post postinst failed; exiting.\n",
+ noiselevel=-1)
+ return phase_retval
+ elif mydo in ["prerm","postrm","config"]:
+ mysettings.load_infodir(mysettings["O"])
+ return spawn(EBUILD_SH_BINARY + " " + mydo,
+ mysettings, debug=debug, free=1, logfile=logfile)
+
+ mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
+
+ # Make sure we get the correct tree in case there are overlays.
+ mytree = os.path.realpath(
+ os.path.dirname(os.path.dirname(mysettings["O"])))
+ newuris, alist = mydbapi.getfetchlist(
+ mycpv, mytree=mytree, mysettings=mysettings)
+ alluris, aalist = mydbapi.getfetchlist(
+ mycpv, mytree=mytree, all=True, mysettings=mysettings)
+ mysettings["A"] = " ".join(alist)
+ mysettings["AA"] = " ".join(aalist)
+ if ("mirror" in features) or fetchall:
+ fetchme = alluris[:]
+ checkme = aalist[:]
+ elif mydo == "digest":
+ fetchme = alluris[:]
+ checkme = aalist[:]
+ # Skip files that we already have digests for.
+ mf = Manifest(mysettings["O"], mysettings["DISTDIR"])
+ mydigests = mf.getTypeDigests("DIST")
+ for filename, hashes in mydigests.iteritems():
+ if len(hashes) == len(mf.hashes):
+ checkme = [i for i in checkme if i != filename]
+ fetchme = [i for i in fetchme \
+ if os.path.basename(i) != filename]
+ del filename, hashes
+ else:
+ fetchme = newuris[:]
+ checkme = alist[:]
+
+ # Only try and fetch the files if we are going to need them ...
+ # otherwise, if user has FEATURES=noauto and they run `ebuild clean
+ # unpack compile install`, we will try and fetch 4 times :/
+ need_distfiles = (mydo in ("fetch", "unpack") or \
+ mydo not in ("digest", "manifest") and "noauto" not in features)
+ if need_distfiles and not fetch(
+ fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
+ return 1
+
+ if mydo == "fetch" and listonly:
+ return 0
+
+ try:
+ if mydo == "manifest":
+ return not digestgen(aalist, mysettings, overwrite=1,
+ manifestonly=1, myportdb=mydbapi)
+ elif mydo == "digest":
+ return not digestgen(aalist, mysettings, overwrite=1,
+ myportdb=mydbapi)
+ elif "digest" in mysettings.features:
+ digestgen(aalist, mysettings, overwrite=0, myportdb=mydbapi)
+ except portage_exception.PermissionDenied, e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ if mydo in ("digest", "manifest"):
+ return 1
+
+ # See above comment about fetching only when needed
+ if not digestcheck(checkme, mysettings, ("strict" in features),
+ (mydo not in ["digest","fetch","unpack"] and \
+ mysettings.get("PORTAGE_CALLER", None) == "ebuild" and \
+ "noauto" in features)):
+ return 1
+
+ if mydo == "fetch":
+ return 0
+
+ # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
+ if (mydo != "setup" and "noauto" not in features) or mydo == "unpack":
+ orig_distdir = mysettings["DISTDIR"]
+ mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
+ edpath = mysettings["DISTDIR"] = \
+ os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
+ if os.path.exists(edpath):
+ try:
+ if os.path.isdir(edpath) and not os.path.islink(edpath):
+ shutil.rmtree(edpath)
+ else:
+ os.unlink(edpath)
+ except OSError:
+ print "!!! Failed reseting ebuild distdir path, " + edpath
+ raise
+ os.mkdir(edpath)
+ apply_secpass_permissions(edpath, uid=portage_uid, mode=0755)
+ try:
+ for file in aalist:
+ os.symlink(os.path.join(orig_distdir, file),
+ os.path.join(edpath, file))
+ except OSError:
+ print "!!! Failed symlinking in '%s' to ebuild distdir" % file
+ raise
+
+ #initial dep checks complete; time to process main commands
+
+ nosandbox = (("userpriv" in features) and \
+ ("usersandbox" not in features) and \
+ ("userpriv" not in mysettings["RESTRICT"]) and \
+ ("nouserpriv" not in mysettings["RESTRICT"]))
+ if nosandbox and ("userpriv" not in features or \
+ "userpriv" in mysettings["RESTRICT"] or \
+ "nouserpriv" in mysettings["RESTRICT"]):
+ nosandbox = ("sandbox" not in features and \
+ "usersandbox" not in features)
+
+ sesandbox = mysettings.selinux_enabled() and \
+ "sesandbox" in mysettings.features
+ ebuild_sh = EBUILD_SH_BINARY + " %s"
+ misc_sh = MISC_SH_BINARY + " dyn_%s"
+
+ # args are for the to spawn function
+ actionmap = {
+"depend": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0, "sesandbox":0}},
+"setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0}},
+"unpack": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":0, "sesandbox":sesandbox}},
+"compile":{"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
+"test": {"cmd":ebuild_sh, "args":{"droppriv":1, "free":nosandbox, "sesandbox":sesandbox}},
+"install":{"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox}},
+"rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0}},
+"package":{"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0}},
+ }
+
+ # merge the deps in so we have again a 'full' actionmap
+ # be glad when this can die.
+ for x in actionmap.keys():
+ if len(actionmap_deps.get(x, [])):
+ actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
+
+ if mydo in actionmap.keys():
+ if mydo=="package":
+ portage_util.ensure_dirs(
+ os.path.join(mysettings["PKGDIR"], mysettings["CATEGORY"]))
+ portage_util.ensure_dirs(
+ os.path.join(mysettings["PKGDIR"], "All"))
+ retval = spawnebuild(mydo,
+ actionmap, mysettings, debug, logfile=logfile)
+ elif mydo=="qmerge":
+ # check to ensure install was run. this *only* pops up when users
+ # forget it and are using ebuild
+ if not os.path.exists(
+ os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
+ writemsg("!!! mydo=qmerge, but install phase hasn't been ran\n",
+ noiselevel=-1)
+ return 1
+ # qmerge is a special phase that implies noclean.
+ if "noclean" not in mysettings.features:
+ mysettings.features.append("noclean")
+ #qmerge is specifically not supposed to do a runtime dep check
+ retval = merge(
+ mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
+ os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
+ myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
+ mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
+ elif mydo=="merge":
+ retval = spawnebuild("install", actionmap, mysettings, debug,
+ alwaysdep=1, logfile=logfile)
+ if retval == os.EX_OK:
+ retval = merge(mysettings["CATEGORY"], mysettings["PF"],
+ mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
+ "build-info"), myroot, mysettings,
+ myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
+ vartree=vartree, prev_mtimes=prev_mtimes)
+ else:
+ print "!!! Unknown mydo:",mydo
+ return 1
+
+ if retval != os.EX_OK and tree == "porttree":
+ for i in xrange(len(mydbapi.porttrees)-1):
+ t = mydbapi.porttrees[i+1]
+ if myebuild.startswith(t):
+ # Display the non-cannonical path, in case it's different, to
+ # prevent confusion.
+ overlays = mysettings["PORTDIR_OVERLAY"].split()
+ try:
+ writemsg("!!! This ebuild is from an overlay: '%s'\n" % \
+ overlays[i], noiselevel=-1)
+ except IndexError:
+ pass
+ break
+ return retval
+
+ finally:
+ if builddir_lock:
+ portage_locks.unlockdir(builddir_lock)
+
+ # Make sure that DISTDIR is restored to it's normal value before we return!
+ if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
+ mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
+ del mysettings["PORTAGE_ACTUAL_DISTDIR"]
+
+ if logfile:
+ try:
+ if os.stat(logfile).st_size == 0:
+ os.unlink(logfile)
+ except OSError:
+ pass
+
+ if mydo in ("digest", "manifest", "help"):
+ # If necessary, depend phase has been triggered by aux_get calls
+ # and the exemption is no longer needed.
+ _doebuild_manifest_exempt_depend -= 1
+
+expandcache={}
+
+def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
+ """moves a file from src to dest, preserving all permissions and attributes; mtime will
+ be preserved even when moving across filesystems. Returns true on success and false on
+ failure. Move is atomic."""
+ #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
+ global lchown
+ if mysettings is None:
+ global settings
+ mysettings = settings
+ selinux_enabled = mysettings.selinux_enabled()
+ try:
+ if not sstat:
+ sstat=os.lstat(src)
+
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Stating source file failed... movefile()"
+ print "!!!",e
+ return None
+
+ destexists=1
+ try:
+ dstat=os.lstat(dest)
+ except (OSError, IOError):
+ dstat=os.lstat(os.path.dirname(dest))
+ destexists=0
+
+ if bsd_chflags:
+ # Check that we can actually unset schg etc flags...
+ # Clear the flags on source and destination; we'll reinstate them after merging
+ if destexists and dstat.st_flags != 0:
+ if bsd_chflags.lchflags(dest, 0) < 0:
+ writemsg("!!! Couldn't clear flags on file being merged: \n ",
+ noiselevel=-1)
+ # We might have an immutable flag on the parent dir; save and clear.
+ pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
+ if pflags != 0:
+ bsd_chflags.lchflags(os.path.dirname(dest), 0)
+
+ if (destexists and bsd_chflags.lhasproblems(dest) > 0) or \
+ bsd_chflags.lhasproblems(os.path.dirname(dest)) > 0:
+ # This is bad: we can't merge the file with these flags set.
+ writemsg("!!! Can't merge file "+dest+" because of flags set\n",
+ noiselevel=-1)
+ return None
+
+ if destexists:
+ if stat.S_ISLNK(dstat[stat.ST_MODE]):
+ try:
+ os.unlink(dest)
+ destexists=0
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ try:
+ target=os.readlink(src)
+ if mysettings and mysettings["D"]:
+ if target.find(mysettings["D"])==0:
+ target=target[len(mysettings["D"]):]
+ if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+ os.unlink(dest)
+ if selinux_enabled:
+ sid = selinux.get_lsid(src)
+ selinux.secure_symlink(target,dest,sid)
+ else:
+ os.symlink(target,dest)
+ lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ return os.lstat(dest)[stat.ST_MTIME]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! failed to properly create symlink:"
+ print "!!!",dest,"->",target
+ print "!!!",e
+ return None
+
+ renamefailed=1
+ if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
+ try:
+ if selinux_enabled:
+ ret=selinux.secure_rename(src,dest)
+ else:
+ ret=os.rename(src,dest)
+ renamefailed=0
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ if e[0]!=errno.EXDEV:
+ # Some random error.
+ print "!!! Failed to move",src,"to",dest
+ print "!!!",e
+ return None
+ # Invalid cross-device-link 'bind' mounted or actually Cross-Device
+ if renamefailed:
+ didcopy=0
+ if stat.S_ISREG(sstat[stat.ST_MODE]):
+ try: # For safety copy then move it over.
+ if selinux_enabled:
+ selinux.secure_copy(src,dest+"#new")
+ selinux.secure_rename(dest+"#new",dest)
+ else:
+ shutil.copyfile(src,dest+"#new")
+ os.rename(dest+"#new",dest)
+ didcopy=1
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print '!!! copy',src,'->',dest,'failed.'
+ print "!!!",e
+ return None
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ if selinux_enabled:
+ a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
+ else:
+ a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
+ if a[0]!=0:
+ print "!!! Failed to move special file:"
+ print "!!! '"+src+"' to '"+dest+"'"
+ print "!!!",a
+ return None # failure
+ try:
+ if didcopy:
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ else:
+ os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+ os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
+ os.unlink(src)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "!!! Failed to chown/chmod/unlink in movefile()"
+ print "!!!",dest
+ print "!!!",e
+ return None
+
+ if newmtime:
+ os.utime(dest,(newmtime,newmtime))
+ else:
+ os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
+ newmtime=sstat[stat.ST_MTIME]
+
+ if bsd_chflags:
+ # Restore the flags we saved before moving
+ if pflags and bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
+ writemsg("!!! Couldn't restore flags (%s) on '%s'\n" % \
+ (str(pflags), os.path.dirname(dest)), noiselevel=-1)
+ return None
+
+ return newmtime
+
+def merge(mycat, mypkg, pkgloc, infloc, myroot, mysettings, myebuild=None,
+ mytree=None, mydbapi=None, vartree=None, prev_mtimes=None):
+ if not os.access(myroot, os.W_OK):
+ writemsg("Permission denied: access('%s', W_OK)\n" % myroot,
+ noiselevel=-1)
+ return errno.EACCES
+ mylink = dblink(mycat, mypkg, myroot, mysettings, treetype=mytree,
+ vartree=vartree)
+ return mylink.merge(pkgloc, infloc, myroot, myebuild,
+ mydbapi=mydbapi, prev_mtimes=prev_mtimes)
+
+def unmerge(cat, pkg, myroot, mysettings, mytrimworld=1, vartree=None, ldpath_mtimes=None):
+ mylink = dblink(
+ cat, pkg, myroot, mysettings, treetype="vartree", vartree=vartree)
+ try:
+ mylink.lockdb()
+ if mylink.exists():
+ retval = mylink.unmerge(trimworld=mytrimworld, cleanup=1,
+ ldpath_mtimes=ldpath_mtimes)
+ if retval == os.EX_OK:
+ mylink.delete()
+ return retval
+ return os.EX_OK
+ finally:
+ mylink.unlockdb()
+
+def getCPFromCPV(mycpv):
+ """Calls pkgsplit on a cpv and returns only the cp."""
+ return pkgsplit(mycpv)[0]
+
+def dep_virtual(mysplit, mysettings):
+ "Does virtual dependency conversion"
+ newsplit=[]
+ myvirtuals = mysettings.getvirtuals()
+ for x in mysplit:
+ if type(x)==types.ListType:
+ newsplit.append(dep_virtual(x, mysettings))
+ else:
+ mykey=dep_getkey(x)
+ mychoices = myvirtuals.get(mykey, None)
+ if mychoices:
+ if len(mychoices) == 1:
+ a = x.replace(mykey, mychoices[0])
+ else:
+ if x[0]=="!":
+ # blocker needs "and" not "or(||)".
+ a=[]
+ else:
+ a=['||']
+ for y in mychoices:
+ a.append(x.replace(mykey, y))
+ newsplit.append(a)
+ else:
+ newsplit.append(x)
+ return newsplit
+
+def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
+ trees=None, **kwargs):
+ """Recursively expand new-style virtuals so as to collapse one or more
+ levels of indirection. In dep_zapdeps, new-style virtuals will be assigned
+ zero cost regardless of whether or not they are currently installed. Virtual
+ blockers are supported but only when the virtual expands to a single
+ atom because it wouldn't necessarily make sense to block all the components
+ of a compound virtual. When more than one new-style virtual is matched,
+ the matches are sorted from highest to lowest versions and the atom is
+ expanded to || ( highest match ... lowest match )."""
+ newsplit = []
+ # According to GLEP 37, RDEPEND is the only dependency type that is valid
+ # for new-style virtuals. Repoman should enforce this.
+ dep_keys = ["RDEPEND", "DEPEND", "PDEPEND"]
+ def compare_pkgs(a, b):
+ return pkgcmp(b[1], a[1])
+ portdb = trees[myroot]["porttree"].dbapi
+ if kwargs["use_binaries"]:
+ portdb = trees[myroot]["bintree"].dbapi
+ myvirtuals = mysettings.getvirtuals()
+ for x in mysplit:
+ if x == "||":
+ newsplit.append(x)
+ continue
+ elif isinstance(x, list):
+ newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
+ mysettings, myroot=myroot, trees=trees, **kwargs))
+ continue
+ if portage_dep._dep_check_strict and \
+ not isvalidatom(x, allow_blockers=True):
+ raise portage_exception.ParseError(
+ "invalid atom: '%s'" % x)
+ mykey = dep_getkey(x)
+ if not mykey.startswith("virtual/"):
+ newsplit.append(x)
+ continue
+ mychoices = myvirtuals.get(mykey, [])
+ isblocker = x.startswith("!")
+ match_atom = x
+ if isblocker:
+ match_atom = x[1:]
+ pkgs = {}
+ for cpv in portdb.match(match_atom):
+ # only use new-style matches
+ if cpv.startswith("virtual/"):
+ pkgs[cpv] = (cpv, pkgsplit(cpv), portdb)
+ if kwargs["use_binaries"] and "vartree" in trees[myroot]:
+ vardb = trees[myroot]["vartree"].dbapi
+ for cpv in vardb.match(match_atom):
+ # only use new-style matches
+ if cpv.startswith("virtual/"):
+ if cpv in pkgs:
+ continue
+ pkgs[cpv] = (cpv, pkgsplit(cpv), vardb)
+ if not (pkgs or mychoices):
+ # This one couldn't be expanded as a new-style virtual. Old-style
+ # virtuals have already been expanded by dep_virtual, so this one
+ # is unavailable and dep_zapdeps will identify it as such. The
+ # atom is not eliminated here since it may still represent a
+ # dependency that needs to be satisfied.
+ newsplit.append(x)
+ continue
+ if not pkgs and len(mychoices) == 1:
+ newsplit.append(x.replace(mykey, mychoices[0]))
+ continue
+ pkgs = pkgs.values()
+ pkgs.sort(compare_pkgs) # Prefer higher versions.
+ if isblocker:
+ a = []
+ else:
+ a = ['||']
+ for y in pkgs:
+ depstring = " ".join(y[2].aux_get(y[0], dep_keys))
+ if edebug:
+ print "Virtual Parent: ", y[0]
+ print "Virtual Depstring:", depstring
+ mycheck = dep_check(depstring, mydbapi, mysettings, myroot=myroot,
+ trees=trees, **kwargs)
+ if not mycheck[0]:
+ raise portage_exception.ParseError(
+ "%s: %s '%s'" % (y[0], mycheck[1], depstring))
+ if isblocker:
+ virtual_atoms = [atom for atom in mycheck[1] \
+ if not atom.startswith("!")]
+ if len(virtual_atoms) == 1:
+ # It wouldn't make sense to block all the components of a
+ # compound virtual, so only a single atom block is allowed.
+ a.append("!" + virtual_atoms[0])
+ else:
+ mycheck[1].append("="+y[0]) # pull in the new-style virtual
+ a.append(mycheck[1])
+ # Plain old-style virtuals. New-style virtuals are preferred.
+ for y in mychoices:
+ a.append(x.replace(mykey, y))
+ if isblocker and not a:
+ # Probably a compound virtual. Pass the atom through unprocessed.
+ newsplit.append(x)
+ continue
+ newsplit.append(a)
+ return newsplit
+
+def dep_eval(deplist):
+ if not deplist:
+ return 1
+ if deplist[0]=="||":
+ #or list; we just need one "1"
+ for x in deplist[1:]:
+ if type(x)==types.ListType:
+ if dep_eval(x)==1:
+ return 1
+ elif x==1:
+ return 1
+ #XXX: unless there's no available atoms in the list
+ #in which case we need to assume that everything is
+ #okay as some ebuilds are relying on an old bug.
+ if len(deplist) == 1:
+ return 1
+ return 0
+ else:
+ for x in deplist:
+ if type(x)==types.ListType:
+ if dep_eval(x)==0:
+ return 0
+ elif x==0 or x==2:
+ return 0
+ return 1
+
+def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
+ """Takes an unreduced and reduced deplist and removes satisfied dependencies.
+ Returned deplist contains steps that must be taken to satisfy dependencies."""
+ if trees is None:
+ global db
+ trees = db
+ writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
+ if not reduced or unreduced == ["||"] or dep_eval(reduced):
+ return []
+
+ if unreduced[0] != "||":
+ unresolved = []
+ for dep, satisfied in izip(unreduced, reduced):
+ if isinstance(dep, list):
+ unresolved += dep_zapdeps(dep, satisfied, myroot,
+ use_binaries=use_binaries, trees=trees)
+ elif not satisfied:
+ unresolved.append(dep)
+ return unresolved
+
+ # We're at a ( || atom ... ) type level and need to make a choice
+ deps = unreduced[1:]
+ satisfieds = reduced[1:]
+
+ # Our preference order is for an the first item that:
+ # a) contains all unmasked packages with the same key as installed packages
+ # b) contains all unmasked packages
+ # c) contains masked installed packages
+ # d) is the first item
+
+ preferred = []
+ possible_upgrades = []
+ other = []
+
+ # Alias the trees we'll be checking availability against
+ vardb = None
+ if "vartree" in trees[myroot]:
+ vardb = trees[myroot]["vartree"].dbapi
+ if use_binaries:
+ mydbapi = trees[myroot]["bintree"].dbapi
+ else:
+ mydbapi = trees[myroot]["porttree"].dbapi
+
+ # Sort the deps into preferred (installed) and other
+ # with values of [[required_atom], availablility]
+ for dep, satisfied in izip(deps, satisfieds):
+ if isinstance(dep, list):
+ atoms = dep_zapdeps(dep, satisfied, myroot,
+ use_binaries=use_binaries, trees=trees)
+ else:
+ atoms = [dep]
+
+ all_available = True
+ for atom in atoms:
+ if not mydbapi.match(atom):
+ # With --usepkgonly, count installed packages as "available".
+ # Note that --usepkgonly currently has no package.mask support.
+ # See bug #149816.
+ if use_binaries and vardb and vardb.match(atom):
+ continue
+ all_available = False
+ break
+
+ if not vardb:
+ # called by repoman
+ preferred.append((atoms, None, all_available))
+ continue
+
+ """ The package names rather than the exact atoms are used for an
+ initial rough match against installed packages. More specific
+ preference selection is handled later via slot and version comparison."""
+ all_installed = True
+ for atom in set([dep_getkey(atom) for atom in atoms]):
+ # New-style virtuals have zero cost to install.
+ if not vardb.match(atom) and not atom.startswith("virtual/"):
+ all_installed = False
+ break
+
+ # Check if the set of atoms will result in a downgrade of
+ # an installed package. If they will then don't prefer them
+ # over other atoms.
+ has_downgrade = False
+ versions = {}
+ if all_installed or all_available:
+ for atom in atoms:
+ mykey = dep_getkey(atom)
+ avail_pkg = best(mydbapi.match(atom))
+ if not avail_pkg:
+ continue
+ avail_slot = "%s:%s" % (mykey,
+ mydbapi.aux_get(avail_pkg, ["SLOT"])[0])
+ versions[avail_slot] = avail_pkg
+ inst_pkg = vardb.match(avail_slot)
+ if not inst_pkg:
+ continue
+ # emerge guarantees 1 package per slot here (highest counter)
+ inst_pkg = inst_pkg[0]
+ if avail_pkg != inst_pkg and \
+ avail_pkg != best([avail_pkg, inst_pkg]):
+ has_downgrade = True
+ break
+
+ this_choice = (atoms, versions, all_available)
+ if not has_downgrade:
+ if all_installed:
+ preferred.append(this_choice)
+ continue
+ elif all_available:
+ possible_upgrades.append(this_choice)
+ continue
+ other.append(this_choice)
+
+ # Compare the "all_installed" choices against the "all_available" choices
+ # for possible missed upgrades. The main purpose of this code is to find
+ # upgrades of new-style virtuals since _expand_new_virtuals() expands them
+ # into || ( highest version ... lowest version ). We want to prefer the
+ # highest all_available version of the new-style virtual when there is a
+ # lower all_installed version.
+ for possible_upgrade in list(possible_upgrades):
+ atoms, versions, all_available = possible_upgrade
+ myslots = set(versions)
+ for other_choice in preferred:
+ o_atoms, o_versions, o_all_available = other_choice
+ intersecting_slots = myslots.intersection(o_versions)
+ if not intersecting_slots:
+ continue
+ has_upgrade = False
+ has_downgrade = False
+ for myslot in intersecting_slots:
+ myversion = versions[myslot]
+ o_version = o_versions[myslot]
+ if myversion != o_version:
+ if myversion == best([myversion, o_version]):
+ has_upgrade = True
+ else:
+ has_downgrade = True
+ break
+ if has_upgrade and not has_downgrade:
+ o_index = preferred.index(other_choice)
+ preferred.insert(o_index, possible_upgrade)
+ possible_upgrades.remove(possible_upgrade)
+ break
+ preferred.extend(possible_upgrades)
+
+ # preferred now contains a) and c) from the order above with
+ # the masked flag differentiating the two. other contains b)
+ # and d) so adding other to preferred will give us a suitable
+ # list to iterate over.
+ preferred.extend(other)
+
+ for allow_masked in (False, True):
+ for atoms, versions, all_available in preferred:
+ if all_available or allow_masked:
+ return atoms
+
+ assert(False) # This point should not be reachable
+
+
+def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
+ if not len(mydep):
+ return mydep
+ if mydep[0]=="*":
+ mydep=mydep[1:]
+ orig_dep = mydep
+ mydep = dep_getcpv(orig_dep)
+ myindex = orig_dep.index(mydep)
+ prefix = orig_dep[:myindex]
+ postfix = orig_dep[myindex+len(mydep):]
+ return prefix + cpv_expand(
+ mydep, mydb=mydb, use_cache=use_cache, settings=settings) + postfix
+
+def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
+ use_cache=1, use_binaries=0, myroot="/", trees=None):
+ """Takes a depend string and parses the condition."""
+ edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
+ #check_config_instance(mysettings)
+ if trees is None:
+ trees = globals()["db"]
+ if use=="yes":
+ if myuse is None:
+ #default behavior
+ myusesplit = mysettings["USE"].split()
+ else:
+ myusesplit = myuse
+ # We've been given useflags to use.
+ #print "USE FLAGS PASSED IN."
+ #print myuse
+ #if "bindist" in myusesplit:
+ # print "BINDIST is set!"
+ #else:
+ # print "BINDIST NOT set."
+ else:
+ #we are being run by autouse(), don't consult USE vars yet.
+ # WE ALSO CANNOT USE SETTINGS
+ myusesplit=[]
+
+ #convert parenthesis to sublists
+ mysplit = portage_dep.paren_reduce(depstring)
+
+ mymasks = set()
+ useforce = set()
+ useforce.add(mysettings["ARCH"])
+ if use == "all":
+ # This masking/forcing is only for repoman. In other cases, relevant
+ # masking/forcing should have already been applied via
+ # config.regenerate(). Also, binary or installed packages may have
+ # been built with flags that are now masked, and it would be
+ # inconsistent to mask them now. Additionally, myuse may consist of
+ # flags from a parent package that is being merged to a $ROOT that is
+ # different from the one that mysettings represents.
+ mymasks.update(mysettings.usemask)
+ mymasks.update(mysettings.archlist())
+ mymasks.discard(mysettings["ARCH"])
+ useforce.update(mysettings.useforce)
+ useforce.difference_update(mymasks)
+ try:
+ mysplit = portage_dep.use_reduce(mysplit, uselist=myusesplit,
+ masklist=mymasks, matchall=(use=="all"), excludeall=useforce)
+ except portage_exception.InvalidDependString, e:
+ return [0, str(e)]
+
+ # Do the || conversions
+ mysplit=portage_dep.dep_opconvert(mysplit)
+
+ if mysplit == []:
+ #dependencies were reduced to nothing
+ return [1,[]]
+
+ # Recursively expand new-style virtuals so as to
+ # collapse one or more levels of indirection.
+ try:
+ mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
+ use=use, mode=mode, myuse=myuse, use_cache=use_cache,
+ use_binaries=use_binaries, myroot=myroot, trees=trees)
+ except portage_exception.ParseError, e:
+ return [0, str(e)]
+
+ mysplit2=mysplit[:]
+ mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
+ if mysplit2 is None:
+ return [0,"Invalid token"]
+
+ writemsg("\n\n\n", 1)
+ writemsg("mysplit: %s\n" % (mysplit), 1)
+ writemsg("mysplit2: %s\n" % (mysplit2), 1)
+
+ myzaps = dep_zapdeps(mysplit, mysplit2, myroot,
+ use_binaries=use_binaries, trees=trees)
+ mylist = flatten(myzaps)
+ writemsg("myzaps: %s\n" % (myzaps), 1)
+ writemsg("mylist: %s\n" % (mylist), 1)
+ #remove duplicates
+ mydict={}
+ for x in mylist:
+ mydict[x]=1
+ writemsg("mydict: %s\n" % (mydict), 1)
+ return [1,mydict.keys()]
+
+def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
+ "Reduces the deplist to ones and zeros"
+ deplist=mydeplist[:]
+ for mypos in xrange(len(deplist)):
+ if type(deplist[mypos])==types.ListType:
+ #recurse
+ deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
+ elif deplist[mypos]=="||":
+ pass
+ else:
+ mykey = dep_getkey(deplist[mypos])
+ if mysettings and mysettings.pprovideddict.has_key(mykey) and \
+ match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
+ deplist[mypos]=True
+ elif mydbapi is None:
+ # Assume nothing is satisfied. This forces dep_zapdeps to
+ # return all of deps the deps that have been selected
+ # (excluding those satisfied by package.provided).
+ deplist[mypos] = False
+ else:
+ if mode:
+ mydep=mydbapi.xmatch(mode,deplist[mypos])
+ else:
+ mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
+ if mydep!=None:
+ tmp=(len(mydep)>=1)
+ if deplist[mypos][0]=="!":
+ tmp=False
+ deplist[mypos]=tmp
+ else:
+ #encountered invalid string
+ return None
+ return deplist
+
+def cpv_getkey(mycpv):
+ myslash=mycpv.split("/")
+ mysplit=pkgsplit(myslash[-1])
+ mylen=len(myslash)
+ if mylen==2:
+ return myslash[0]+"/"+mysplit[0]
+ elif mylen==1:
+ return mysplit[0]
+ else:
+ return mysplit
+
+def key_expand(mykey, mydb=None, use_cache=1, settings=None):
+ mysplit=mykey.split("/")
+ if settings is None:
+ settings = globals()["settings"]
+ virts = settings.getvirtuals("/")
+ virts_p = settings.get_virts_p("/")
+ if len(mysplit)==1:
+ if mydb and type(mydb)==types.InstanceType:
+ for x in settings.categories:
+ if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
+ return x+"/"+mykey
+ if virts_p.has_key(mykey):
+ return(virts_p[mykey][0])
+ return "null/"+mykey
+ elif mydb:
+ if type(mydb)==types.InstanceType:
+ if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
+ return virts[mykey][0]
+ return mykey
+
+def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
+ """Given a string (packagename or virtual) expand it into a valid
+ cat/package string. Virtuals use the mydb to determine which provided
+ virtual is a valid choice and defaults to the first element when there
+ are no installed/available candidates."""
+ myslash=mycpv.split("/")
+ mysplit=pkgsplit(myslash[-1])
+ if settings is None:
+ settings = globals()["settings"]
+ virts = settings.getvirtuals("/")
+ virts_p = settings.get_virts_p("/")
+ if len(myslash)>2:
+ # this is illegal case.
+ mysplit=[]
+ mykey=mycpv
+ elif len(myslash)==2:
+ if mysplit:
+ mykey=myslash[0]+"/"+mysplit[0]
+ else:
+ mykey=mycpv
+ if mydb and virts and mykey in virts:
+ writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
+ if type(mydb)==types.InstanceType:
+ if not mydb.cp_list(mykey, use_cache=use_cache):
+ writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
+ mykey_orig = mykey[:]
+ for vkey in virts[mykey]:
+ if mydb.cp_list(vkey,use_cache=use_cache):
+ mykey = vkey
+ writemsg("virts chosen: %s\n" % (mykey), 1)
+ break
+ if mykey == mykey_orig:
+ mykey=virts[mykey][0]
+ writemsg("virts defaulted: %s\n" % (mykey), 1)
+ #we only perform virtual expansion if we are passed a dbapi
+ else:
+ #specific cpv, no category, ie. "foo-1.0"
+ if mysplit:
+ myp=mysplit[0]
+ else:
+ # "foo" ?
+ myp=mycpv
+ mykey=None
+ matches=[]
+ if mydb:
+ for x in settings.categories:
+ if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
+ matches.append(x+"/"+myp)
+ if (len(matches)>1):
+ raise ValueError, matches
+ elif matches:
+ mykey=matches[0]
+
+ if not mykey and type(mydb)!=types.ListType:
+ if virts_p.has_key(myp):
+ mykey=virts_p[myp][0]
+ #again, we only perform virtual expansion if we have a dbapi (not a list)
+ if not mykey:
+ mykey="null/"+myp
+ if mysplit:
+ if mysplit[2]=="r0":
+ return mykey+"-"+mysplit[1]
+ else:
+ return mykey+"-"+mysplit[1]+"-"+mysplit[2]
+ else:
+ return mykey
+
+def getmaskingreason(mycpv, settings=None, portdb=None):
+ from portage_util import grablines
+ if settings is None:
+ settings = globals()["settings"]
+ if portdb is None:
+ portdb = globals()["portdb"]
+ mysplit = catpkgsplit(mycpv)
+ if not mysplit:
+ raise ValueError("invalid CPV: %s" % mycpv)
+ if not portdb.cpv_exists(mycpv):
+ raise KeyError("CPV %s does not exist" % mycpv)
+ mycp=mysplit[0]+"/"+mysplit[1]
+
+ # XXX- This is a temporary duplicate of code from the config constructor.
+ locations = [os.path.join(settings["PORTDIR"], "profiles")]
+ locations.extend(settings.profiles)
+ for ov in settings["PORTDIR_OVERLAY"].split():
+ profdir = os.path.join(normalize_path(ov), "profiles")
+ if os.path.isdir(profdir):
+ locations.append(profdir)
+ locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH.lstrip(os.path.sep)))
+ locations.reverse()
+ pmasklists = [grablines(os.path.join(x, "package.mask"), recursive=1) for x in locations]
+ pmasklines = []
+ while pmasklists: # stack_lists doesn't preserve order so it can't be used
+ pmasklines.extend(pmasklists.pop(0))
+ del pmasklists
+
+ if settings.pmaskdict.has_key(mycp):
+ for x in settings.pmaskdict[mycp]:
+ if mycpv in portdb.xmatch("match-all", x):
+ comment = ""
+ l = "\n"
+ comment_valid = -1
+ for i in xrange(len(pmasklines)):
+ l = pmasklines[i].strip()
+ if l == "":
+ comment = ""
+ comment_valid = -1
+ elif l[0] == "#":
+ comment += (l+"\n")
+ comment_valid = i + 1
+ elif l == x:
+ if comment_valid != i:
+ comment = ""
+ return comment
+ elif comment_valid != -1:
+ # Apparently this comment applies to muliple masks, so
+ # it remains valid until a blank line is encountered.
+ comment_valid += 1
+ return None
+
+def getmaskingstatus(mycpv, settings=None, portdb=None):
+ if settings is None:
+ settings = globals()["settings"]
+ if portdb is None:
+ portdb = globals()["portdb"]
+ mysplit = catpkgsplit(mycpv)
+ if not mysplit:
+ raise ValueError("invalid CPV: %s" % mycpv)
+ if not portdb.cpv_exists(mycpv):
+ raise KeyError("CPV %s does not exist" % mycpv)
+ mycp=mysplit[0]+"/"+mysplit[1]
+
+ rValue = []
+
+ # profile checking
+ revmaskdict=settings.prevmaskdict
+ if revmaskdict.has_key(mycp):
+ for x in revmaskdict[mycp]:
+ if x[0]=="*":
+ myatom = x[1:]
+ else:
+ myatom = x
+ if not match_to_list(mycpv, [myatom]):
+ rValue.append("profile")
+ break
+
+ # package.mask checking
+ maskdict=settings.pmaskdict
+ unmaskdict=settings.punmaskdict
+ if maskdict.has_key(mycp):
+ for x in maskdict[mycp]:
+ if mycpv in portdb.xmatch("match-all", x):
+ unmask=0
+ if unmaskdict.has_key(mycp):
+ for z in unmaskdict[mycp]:
+ if mycpv in portdb.xmatch("match-all",z):
+ unmask=1
+ break
+ if unmask==0:
+ rValue.append("package.mask")
+
+ # keywords checking
+ try:
+ mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
+ except KeyError:
+ # The "depend" phase apparently failed for some reason. An associated
+ # error message will have already been printed to stderr.
+ return ["corruption"]
+ if not eapi_is_supported(eapi):
+ return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
+ mygroups = mygroups.split()
+ pgroups = settings["ACCEPT_KEYWORDS"].split()
+ myarch = settings["ARCH"]
+ if pgroups and myarch not in pgroups:
+ """For operating systems other than Linux, ARCH is not necessarily a
+ valid keyword."""
+ myarch = pgroups[0].lstrip("~")
+ pkgdict = settings.pkeywordsdict
+
+ cp = dep_getkey(mycpv)
+ if pkgdict.has_key(cp):
+ matches = match_to_list(mycpv, pkgdict[cp].keys())
+ for match in matches:
+ pgroups.extend(pkgdict[cp][match])
+ if matches:
+ inc_pgroups = []
+ for x in pgroups:
+ if x != "-*" and x.startswith("-"):
+ try:
+ inc_pgroups.remove(x[1:])
+ except ValueError:
+ pass
+ if x not in inc_pgroups:
+ inc_pgroups.append(x)
+ pgroups = inc_pgroups
+ del inc_pgroups
+
+ kmask = "missing"
+
+ for keyword in pgroups:
+ if keyword in mygroups:
+ kmask=None
+
+ if kmask:
+ fallback = None
+ for gp in mygroups:
+ if gp=="*":
+ kmask=None
+ break
+ elif gp=="-"+myarch:
+ kmask="-"+myarch
+ break
+ elif gp=="~"+myarch:
+ kmask="~"+myarch
+ break
+
+ if kmask:
+ rValue.append(kmask+" keyword")
+ return rValue
+
+class portagetree:
+ def __init__(self, root="/", virtual=None, clone=None, settings=None):
+ """
+ Constructor for a PortageTree
+
+ @param root: ${ROOT}, defaults to '/', see make.conf(5)
+ @type root: String/Path
+ @param virtual: UNUSED
+ @type virtual: No Idea
+ @param clone: Set this if you want a copy of Clone
+ @type clone: Existing portagetree Instance
+ @param settings: Portage Configuration object (portage.settings)
+ @type settings: Instance of portage.config
+ """
+
+ if clone:
+ self.root = clone.root
+ self.portroot = clone.portroot
+ self.pkglines = clone.pkglines
+ else:
+ self.root = root
+ if settings is None:
+ settings = globals()["settings"]
+ self.settings = settings
+ self.portroot = settings["PORTDIR"]
+ self.virtual = virtual
+ self.dbapi = portdbapi(
+ settings["PORTDIR"], mysettings=settings)
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method"
+ mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def dep_match(self,mydep):
+ "compatibility method"
+ mymatch=self.dbapi.xmatch("match-visible",mydep)
+ if mymatch is None:
+ return []
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getname(self,pkgname):
+ "returns file location for this particular package (DEPRECATED)"
+ if not pkgname:
+ return ""
+ mysplit=pkgname.split("/")
+ psplit=pkgsplit(mysplit[1])
+ return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
+
+ def resolve_specific(self,myspec):
+ cps=catpkgsplit(myspec)
+ if not cps:
+ return None
+ mykey = key_expand(cps[0]+"/"+cps[1], mydb=self.dbapi,
+ settings=self.settings)
+ mykey=mykey+"-"+cps[2]
+ if cps[3]!="r0":
+ mykey=mykey+"-"+cps[3]
+ return mykey
+
+ def depcheck(self,mycheck,use="yes",myusesplit=None):
+ return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+ return myslot
+
+
+class dbapi:
+ def __init__(self):
+ pass
+
+ def close_caches(self):
+ pass
+
+ def cp_list(self,cp,use_cache=1):
+ return
+
+ def cpv_all(self):
+ cpv_list = []
+ for cp in self.cp_all():
+ cpv_list.extend(self.cp_list(cp))
+ return cpv_list
+
+ def aux_get(self,mycpv,mylist):
+ "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
+ raise NotImplementedError
+
+ def match(self,origdep,use_cache=1):
+ mydep = dep_expand(origdep, mydb=self, settings=self.settings)
+ mykey=dep_getkey(mydep)
+ mylist = match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
+ myslot = portage_dep.dep_getslot(mydep)
+ if myslot is not None:
+ mylist = [cpv for cpv in mylist \
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot]
+ return mylist
+
+ def match2(self,mydep,mykey,mylist):
+ writemsg("DEPRECATED: dbapi.match2\n")
+ match_from_list(mydep,mylist)
+
+ def invalidentry(self, mypath):
+ if re.search("portage_lockfile$",mypath):
+ if not os.environ.has_key("PORTAGE_MASTER_PID"):
+ writemsg("Lockfile removed: %s\n" % mypath, 1)
+ portage_locks.unlockfile((mypath,None,None))
+ else:
+ # Nothing we can do about it. We're probably sandboxed.
+ pass
+ elif re.search(".*/-MERGING-(.*)",mypath):
+ if os.path.exists(mypath):
+ writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n", noiselevel=-1)
+ else:
+ writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
+
+
+
+class fakedbapi(dbapi):
+ "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
+ def __init__(self, settings=None):
+ self.cpvdict={}
+ self.cpdict={}
+ if settings is None:
+ settings = globals()["settings"]
+ self.settings = settings
+ self._match_cache = {}
+
+ def _clear_cache(self):
+ if self._match_cache:
+ self._match_cache = {}
+
+ def match(self, origdep, use_cache=1):
+ result = self._match_cache.get(origdep, None)
+ if result is not None:
+ return result[:]
+ result = dbapi.match(self, origdep, use_cache=use_cache)
+ self._match_cache[origdep] = result
+ return result[:]
+
+ def cpv_exists(self,mycpv):
+ return self.cpvdict.has_key(mycpv)
+
+ def cp_list(self,mycp,use_cache=1):
+ if not self.cpdict.has_key(mycp):
+ return []
+ else:
+ return self.cpdict[mycp]
+
+ def cp_all(self):
+ returnme=[]
+ for x in self.cpdict.keys():
+ returnme.extend(self.cpdict[x])
+ return returnme
+
+ def cpv_all(self):
+ return self.cpvdict.keys()
+
+ def cpv_inject(self, mycpv, metadata=None):
+ """Adds a cpv from the list of available packages."""
+ self._clear_cache()
+ mycp=cpv_getkey(mycpv)
+ self.cpvdict[mycpv] = metadata
+ myslot = None
+ if metadata:
+ myslot = metadata.get("SLOT", None)
+ if myslot and mycp in self.cpdict:
+ # If necessary, remove another package in the same SLOT.
+ for cpv in self.cpdict[mycp]:
+ if mycpv != cpv:
+ other_metadata = self.cpvdict[cpv]
+ if other_metadata:
+ if myslot == other_metadata.get("SLOT", None):
+ self.cpv_remove(cpv)
+ break
+ if mycp not in self.cpdict:
+ self.cpdict[mycp] = []
+ if not mycpv in self.cpdict[mycp]:
+ self.cpdict[mycp].append(mycpv)
+
+ def cpv_remove(self,mycpv):
+ """Removes a cpv from the list of available packages."""
+ self._clear_cache()
+ mycp=cpv_getkey(mycpv)
+ if self.cpvdict.has_key(mycpv):
+ del self.cpvdict[mycpv]
+ if not self.cpdict.has_key(mycp):
+ return
+ while mycpv in self.cpdict[mycp]:
+ del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
+ if not len(self.cpdict[mycp]):
+ del self.cpdict[mycp]
+
+ def aux_get(self, mycpv, wants):
+ if not self.cpv_exists(mycpv):
+ raise KeyError(mycpv)
+ metadata = self.cpvdict[mycpv]
+ if not metadata:
+ return ["" for x in wants]
+ return [metadata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ self._clear_cache()
+ self.cpvdict[cpv].update(values)
+
+class bindbapi(fakedbapi):
+ def __init__(self, mybintree=None, settings=None):
+ self.bintree = mybintree
+ self.cpvdict={}
+ self.cpdict={}
+ if settings is None:
+ settings = globals()["settings"]
+ self.settings = settings
+ self._match_cache = {}
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(["SLOT"])
+ self._aux_cache = {}
+
+ def match(self, *pargs, **kwargs):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.match(self, *pargs, **kwargs)
+
+ def aux_get(self,mycpv,wants):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ cache_me = False
+ if not set(wants).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ return [aux_cache[x] for x in wants]
+ cache_me = True
+ mysplit = mycpv.split("/")
+ mylist = []
+ tbz2name = mysplit[1]+".tbz2"
+ if self.bintree and not self.bintree.isremote(mycpv):
+ tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
+ getitem = tbz2.getfile
+ else:
+ getitem = self.bintree.remotepkgs[tbz2name].get
+ mydata = {}
+ mykeys = wants
+ if cache_me:
+ mykeys = self._aux_cache_keys.union(wants)
+ for x in mykeys:
+ myval = getitem(x)
+ # myval is None if the key doesn't exist
+ # or the tbz2 is corrupt.
+ if myval:
+ mydata[x] = " ".join(myval.split())
+ if "EAPI" in mykeys:
+ if not mydata.setdefault("EAPI", "0"):
+ mydata["EAPI"] = "0"
+ if cache_me:
+ aux_cache = {}
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, "")
+ self._aux_cache[mycpv] = aux_cache
+ return [mydata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ tbz2path = self.bintree.getname(cpv)
+ if not os.path.exists(tbz2path):
+ raise KeyError(cpv)
+ mytbz2 = xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ mydata.update(values)
+ mytbz2.recompose_mem(xpak.xpak_mem(mydata))
+
+ def cp_list(self, *pargs, **kwargs):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_list(self, *pargs, **kwargs)
+
+ def cpv_all(self):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_all(self)
+
+class vardbapi(dbapi):
+ def __init__(self, root, categories=None, settings=None, vartree=None):
+ self.root = root[:]
+ #cache for category directory mtimes
+ self.mtdircache = {}
+ #cache for dependency checks
+ self.matchcache = {}
+ #cache for cp_list results
+ self.cpcache = {}
+ self.blockers = None
+ if settings is None:
+ settings = globals()["settings"]
+ self.settings = settings
+ if categories is None:
+ categories = settings.categories
+ self.categories = categories[:]
+ if vartree is None:
+ vartree = globals()["db"][root]["vartree"]
+ self.vartree = vartree
+ self._aux_cache_keys = set(["SLOT", "COUNTER", "PROVIDE", "USE",
+ "IUSE", "DEPEND", "RDEPEND", "PDEPEND"])
+ self._aux_cache = None
+ self._aux_cache_version = "1"
+ self._aux_cache_filename = os.path.join(self.root,
+ CACHE_PATH.lstrip(os.path.sep), "vdb_metadata.pickle")
+
+ def cpv_exists(self,mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ return os.path.exists(self.root+VDB_PATH+"/"+mykey)
+
+ def cpv_counter(self,mycpv):
+ "This method will grab the COUNTER. Returns a counter value."
+ try:
+ return long(self.aux_get(mycpv, ["COUNTER"])[0])
+ except KeyError, ValueError:
+ pass
+ cdir=self.root+VDB_PATH+"/"+mycpv
+ cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
+
+ # We write our new counter value to a new file that gets moved into
+ # place to avoid filesystem corruption on XFS (unexpected reboot.)
+ corrupted=0
+ if os.path.exists(cpath):
+ cfile=open(cpath, "r")
+ try:
+ counter=long(cfile.readline())
+ except ValueError:
+ print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
+ counter=long(0)
+ corrupted=1
+ cfile.close()
+ elif os.path.exists(cdir):
+ mys = pkgsplit(mycpv)
+ myl = self.match(mys[0],use_cache=0)
+ print mys,myl
+ if len(myl) == 1:
+ try:
+ # Only one package... Counter doesn't matter.
+ write_atomic(cpath, "1")
+ counter = 1
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
+ noiselevel=-1)
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
+ noiselevel=-1)
+ writemsg("!!! unmerge this exact version.\n", noiselevel=-1)
+ writemsg("!!! %s\n" % e, noiselevel=-1)
+ sys.exit(1)
+ else:
+ writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n",
+ noiselevel=-1)
+ writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n",
+ noiselevel=-1)
+ writemsg("!!! remerge the package.\n", noiselevel=-1)
+ sys.exit(1)
+ else:
+ counter=long(0)
+ if corrupted:
+ # update new global counter file
+ write_atomic(cpath, str(counter))
+ return counter
+
+ def cpv_inject(self,mycpv):
+ "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
+ os.makedirs(self.root+VDB_PATH+"/"+mycpv)
+ counter = self.counter_tick(self.root, mycpv=mycpv)
+ # write local package counter so that emerge clean does the right thing
+ write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
+
+ def isInjected(self,mycpv):
+ if self.cpv_exists(mycpv):
+ if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
+ return True
+ if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
+ return True
+ return False
+
+ def move_ent(self,mylist):
+ origcp=mylist[1]
+ newcp=mylist[2]
+
+ # sanity check
+ for cp in [origcp,newcp]:
+ if not (isvalidatom(cp) and isjustname(cp)):
+ raise portage_exception.InvalidPackageName(cp)
+ origmatches=self.match(origcp,use_cache=0)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ mycpsplit=catpkgsplit(mycpv)
+ mynewcpv=newcp+"-"+mycpsplit[2]
+ mynewcat=newcp.split("/")[0]
+ if mycpsplit[3]!="r0":
+ mynewcpv += "-"+mycpsplit[3]
+ mycpsplit_new = catpkgsplit(mynewcpv)
+ origpath=self.root+VDB_PATH+"/"+mycpv
+ if not os.path.exists(origpath):
+ continue
+ writemsg_stdout("@")
+ if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
+ #create the directory
+ os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
+ newpath=self.root+VDB_PATH+"/"+mynewcpv
+ if os.path.exists(newpath):
+ #dest already exists; keep this puppy where it is.
+ continue
+ os.rename(origpath, newpath)
+
+ # We need to rename the ebuild now.
+ old_pf = catsplit(mycpv)[1]
+ new_pf = catsplit(mynewcpv)[1]
+ if new_pf != old_pf:
+ try:
+ os.rename(os.path.join(newpath, old_pf + ".ebuild"),
+ os.path.join(newpath, new_pf + ".ebuild"))
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
+
+ write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
+ fixdbentries([mylist], newpath)
+
+ def update_ents(self, update_iter):
+ """Run fixdbentries on all installed packages (time consuming). Like
+ fixpackages, this should be run from a helper script and display
+ a progress indicator."""
+ dbdir = os.path.join(self.root, VDB_PATH)
+ for catdir in listdir(dbdir):
+ catdir = dbdir+"/"+catdir
+ if os.path.isdir(catdir):
+ for pkgdir in listdir(catdir):
+ pkgdir = catdir+"/"+pkgdir
+ if os.path.isdir(pkgdir):
+ fixdbentries(update_iter, pkgdir)
+
+ def move_slot_ent(self,mylist):
+ pkg=mylist[1]
+ origslot=mylist[2]
+ newslot=mylist[3]
+
+ if not isvalidatom(pkg):
+ raise portage_exception.InvalidAtom(pkg)
+
+ origmatches=self.match(pkg,use_cache=0)
+
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ origpath=self.root+VDB_PATH+"/"+mycpv
+ if not os.path.exists(origpath):
+ continue
+
+ slot=grabfile(origpath+"/SLOT");
+ if (not slot):
+ continue
+
+ if (slot[0]!=origslot):
+ continue
+
+ writemsg_stdout("s")
+ write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
+
+ def cp_list(self,mycp,use_cache=1):
+ mysplit=mycp.split("/")
+ if mysplit[0] == '*':
+ mysplit[0] = mysplit[0][1:]
+ try:
+ mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
+ except OSError:
+ mystat=0
+ if use_cache and self.cpcache.has_key(mycp):
+ cpc=self.cpcache[mycp]
+ if cpc[0]==mystat:
+ return cpc[1]
+ list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
+
+ if (list is None):
+ return []
+ returnme=[]
+ for x in list:
+ if x.startswith("."):
+ continue
+ if x[0] == '-':
+ #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
+ continue
+ ps=pkgsplit(x)
+ if not ps:
+ self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
+ continue
+ if len(mysplit) > 1:
+ if ps[0]==mysplit[1]:
+ returnme.append(mysplit[0]+"/"+x)
+ if use_cache:
+ self.cpcache[mycp]=[mystat,returnme]
+ elif self.cpcache.has_key(mycp):
+ del self.cpcache[mycp]
+ return returnme
+
+ def cpv_all(self,use_cache=1):
+ returnme=[]
+ basepath = self.root+VDB_PATH+"/"
+
+ for x in self.categories:
+ for y in listdir(basepath+x,EmptyOnError=1):
+ if y.startswith("."):
+ continue
+ subpath = x+"/"+y
+ # -MERGING- should never be a cpv, nor should files.
+ if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
+ returnme += [subpath]
+ return returnme
+
+ def cp_all(self,use_cache=1):
+ mylist = self.cpv_all(use_cache=use_cache)
+ d={}
+ for y in mylist:
+ if y[0] == '*':
+ y = y[1:]
+ mysplit=catpkgsplit(y)
+ if not mysplit:
+ self.invalidentry(self.root+VDB_PATH+"/"+y)
+ continue
+ d[mysplit[0]+"/"+mysplit[1]] = None
+ return d.keys()
+
+ def checkblockers(self,origdep):
+ pass
+
+ def match(self,origdep,use_cache=1):
+ "caching match function"
+ mydep = dep_expand(
+ origdep, mydb=self, use_cache=use_cache, settings=self.settings)
+ mykey=dep_getkey(mydep)
+ mycat=mykey.split("/")[0]
+ if not use_cache:
+ if self.matchcache.has_key(mycat):
+ del self.mtdircache[mycat]
+ del self.matchcache[mycat]
+ mymatch = match_from_list(mydep,
+ self.cp_list(mykey, use_cache=use_cache))
+ myslot = portage_dep.dep_getslot(mydep)
+ if myslot is not None:
+ mymatch = [cpv for cpv in mymatch \
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot]
+ return mymatch
+ try:
+ curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
+ except (IOError, OSError):
+ curmtime=0
+
+ if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
+ # clear cache entry
+ self.mtdircache[mycat]=curmtime
+ self.matchcache[mycat]={}
+ if not self.matchcache[mycat].has_key(mydep):
+ mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
+ myslot = portage_dep.dep_getslot(mydep)
+ if myslot is not None:
+ mymatch = [cpv for cpv in mymatch \
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot]
+ self.matchcache[mycat][mydep]=mymatch
+ return self.matchcache[mycat][mydep][:]
+
+ def findname(self, mycpv):
+ return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
+
+ def flush_cache(self):
+ """If the current user has permission and the internal aux_get cache has
+ been updated, save it to disk and mark it unmodified. This is called
+ by emerge after it has loaded the full vdb for use in dependency
+ calculations. Currently, the cache is only written if the user has
+ superuser privileges (since that's required to obtain a lock), but all
+ users have read access and benefit from faster metadata lookups (as
+ long as at least part of the cache is still valid)."""
+ if self._aux_cache is not None and \
+ self._aux_cache["modified"] and \
+ secpass >= 2:
+ valid_nodes = set(self.cpv_all())
+ for cpv in self._aux_cache["packages"].keys():
+ if cpv not in valid_nodes:
+ del self._aux_cache["packages"][cpv]
+ del self._aux_cache["modified"]
+ try:
+ f = atomic_ofstream(self._aux_cache_filename)
+ cPickle.dump(self._aux_cache, f, -1)
+ f.close()
+ portage_util.apply_secpass_permissions(
+ self._aux_cache_filename, gid=portage_gid, mode=0644)
+ except (IOError, OSError), e:
+ pass
+ self._aux_cache["modified"] = False
+
+ def aux_get(self, mycpv, wants):
+ """This automatically caches selected keys that are frequently needed
+ by emerge for dependency calculations. The cached metadata is
+ considered valid if the mtime of the package directory has not changed
+ since the data was cached. The cache is stored in a pickled dict
+ object with the following format:
+
+ {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
+
+ If an error occurs while loading the cache pickle or the version is
+ unrecognized, the cache will simple be recreated from scratch (it is
+ completely disposable).
+ """
+ if not self._aux_cache_keys.intersection(wants):
+ return self._aux_get(mycpv, wants)
+ if self._aux_cache is None:
+ try:
+ f = open(self._aux_cache_filename)
+ mypickle = cPickle.Unpickler(f)
+ mypickle.find_global = None
+ self._aux_cache = mypickle.load()
+ f.close()
+ del f
+ except (IOError, OSError, EOFError, cPickle.UnpicklingError):
+ pass
+ if not self._aux_cache or \
+ not isinstance(self._aux_cache, dict) or \
+ self._aux_cache.get("version") != self._aux_cache_version or \
+ not self._aux_cache.get("packages"):
+ self._aux_cache = {"version":self._aux_cache_version}
+ self._aux_cache["packages"] = {}
+ self._aux_cache["modified"] = False
+ mydir = os.path.join(self.root, VDB_PATH, mycpv)
+ mydir_stat = None
+ try:
+ mydir_stat = os.stat(mydir)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise KeyError(mycpv)
+ mydir_mtime = long(mydir_stat.st_mtime)
+ pkg_data = self._aux_cache["packages"].get(mycpv)
+ mydata = {}
+ cache_valid = False
+ if pkg_data:
+ cache_mtime, metadata = pkg_data
+ cache_valid = cache_mtime == mydir_mtime
+ if cache_valid and set(metadata) != self._aux_cache_keys:
+ # Allow self._aux_cache_keys to change without a cache version
+ # bump.
+ cache_valid = False
+ if cache_valid:
+ mydata.update(metadata)
+ pull_me = set(wants).difference(self._aux_cache_keys)
+ else:
+ pull_me = self._aux_cache_keys.union(wants)
+ if pull_me:
+ # pull any needed data and cache it
+ aux_keys = list(pull_me)
+ for k, v in izip(aux_keys, self._aux_get(mycpv, aux_keys)):
+ mydata[k] = v
+ if not cache_valid:
+ cache_data = {}
+ for aux_key in self._aux_cache_keys:
+ cache_data[aux_key] = mydata[aux_key]
+ self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
+ self._aux_cache["modified"] = True
+ return [mydata[x] for x in wants]
+
+ def _aux_get(self, mycpv, wants):
+ mydir = os.path.join(self.root, VDB_PATH, mycpv)
+ if not os.path.isdir(mydir):
+ raise KeyError(mycpv)
+ results = []
+ for x in wants:
+ try:
+ myf = open(os.path.join(mydir, x), "r")
+ try:
+ myd = myf.read()
+ finally:
+ myf.close()
+ myd = " ".join(myd.split())
+ except IOError:
+ myd = ""
+ if x == "EAPI" and not myd:
+ results.append("0")
+ else:
+ results.append(myd)
+ return results
+
+ def aux_update(self, cpv, values):
+ cat, pkg = cpv.split("/")
+ mylink = dblink(cat, pkg, self.root, self.settings,
+ treetype="vartree", vartree=self.vartree)
+ if not mylink.exists():
+ raise KeyError(cpv)
+ for k, v in values.iteritems():
+ mylink.setfile(k, v)
+
+ def counter_tick(self,myroot,mycpv=None):
+ return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
+
+ def get_counter_tick_core(self,myroot,mycpv=None):
+ return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
+
+ def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
+ "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
+ cpath=myroot+"var/cache/edb/counter"
+ changed=0
+ min_counter = 0
+ if mycpv:
+ mysplit = pkgsplit(mycpv)
+ for x in self.match(mysplit[0],use_cache=0):
+ if x==mycpv:
+ continue
+ try:
+ old_counter = long(self.aux_get(x,["COUNTER"])[0])
+ writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
+ except (ValueError, KeyError): # valueError from long(), KeyError from aux_get
+ old_counter = 0
+ writemsg("!!! BAD COUNTER in '%s'\n" % (x), noiselevel=-1)
+ if old_counter > min_counter:
+ min_counter = old_counter
+
+ # We write our new counter value to a new file that gets moved into
+ # place to avoid filesystem corruption.
+ find_counter = ("find '%s' -type f -name COUNTER | " + \
+ "while read f; do echo $(<\"${f}\"); done | " + \
+ "sort -n | tail -n1") % os.path.join(self.root, VDB_PATH)
+ if os.path.exists(cpath):
+ cfile=open(cpath, "r")
+ try:
+ counter=long(cfile.readline())
+ except (ValueError,OverflowError):
+ try:
+ counter = long(commands.getoutput(find_counter).strip())
+ writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter,
+ noiselevel=-1)
+ changed=1
+ except (ValueError,OverflowError):
+ writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n",
+ noiselevel=-1)
+ writemsg("!!! corrected/normalized so that portage can operate properly.\n",
+ noiselevel=-1)
+ writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
+ sys.exit(2)
+ cfile.close()
+ else:
+ try:
+ counter = long(commands.getoutput(find_counter).strip())
+ writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter,
+ noiselevel=-1)
+ except ValueError: # Value Error for long(), probably others for commands.getoutput
+ writemsg("!!! Initializing global counter.\n", noiselevel=-1)
+ counter=long(0)
+ changed=1
+
+ if counter < min_counter:
+ counter = min_counter+1000
+ changed = 1
+
+ if incrementing or changed:
+
+ #increment counter
+ counter += 1
+ # update new global counter file
+ write_atomic(cpath, str(counter))
+ return counter
+
+class vartree(object):
+ "this tree will scan a var/db/pkg database located at root (passed to init)"
+ def __init__(self, root="/", virtual=None, clone=None, categories=None,
+ settings=None):
+ if clone:
+ self.root = clone.root[:]
+ self.dbapi = copy.deepcopy(clone.dbapi)
+ self.populated = 1
+ self.settings = config(clone=clone.settings)
+ else:
+ self.root = root[:]
+ if settings is None:
+ settings = globals()["settings"]
+ self.settings = settings # for key_expand calls
+ if categories is None:
+ categories = settings.categories
+ self.dbapi = vardbapi(self.root, categories=categories,
+ settings=settings, vartree=self)
+ self.populated = 1
+
+ def zap(self,mycpv):
+ return
+
+ def inject(self,mycpv):
+ return
+
+ def get_provide(self,mycpv):
+ myprovides=[]
+ mylines = None
+ try:
+ mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE","USE"])
+ if mylines:
+ myuse = myuse.split()
+ mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
+ for myprovide in mylines:
+ mys = catpkgsplit(myprovide)
+ if not mys:
+ mys = myprovide.split("/")
+ myprovides += [mys[0] + "/" + mys[1]]
+ return myprovides
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ mydir = os.path.join(self.root, VDB_PATH, mycpv)
+ writemsg("\nParse Error reading PROVIDE and USE in '%s'\n" % mydir,
+ noiselevel=-1)
+ if mylines:
+ writemsg("Possibly Invalid: '%s'\n" % str(mylines),
+ noiselevel=-1)
+ writemsg("Exception: %s\n\n" % str(e), noiselevel=-1)
+ return []
+
+ def get_all_provides(self):
+ myprovides = {}
+ for node in self.getallcpv():
+ for mykey in self.get_provide(node):
+ if myprovides.has_key(mykey):
+ myprovides[mykey] += [node]
+ else:
+ myprovides[mykey] = [node]
+ return myprovides
+
+ def dep_bestmatch(self,mydep,use_cache=1):
+ "compatibility method -- all matches, not just visible ones"
+ #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
+ mymatch = best(self.dbapi.match(
+ dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
+ use_cache=use_cache))
+ if mymatch is None:
+ return ""
+ else:
+ return mymatch
+
+ def dep_match(self,mydep,use_cache=1):
+ "compatibility method -- we want to see all matches, not just visible ones"
+ #mymatch=match(mydep,self.dbapi)
+ mymatch=self.dbapi.match(mydep,use_cache=use_cache)
+ if mymatch is None:
+ return []
+ else:
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallcpv(self):
+ """temporary function, probably to be renamed --- Gets a list of all
+ category/package-versions installed on the system."""
+ return self.dbapi.cpv_all()
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def exists_specific_cat(self,cpv,use_cache=1):
+ cpv = key_expand(cpv, mydb=self.dbapi, use_cache=use_cache,
+ settings=self.settings)
+ a=catpkgsplit(cpv)
+ if not a:
+ return 0
+ mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
+ for x in mylist:
+ b=pkgsplit(x)
+ if not b:
+ self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
+ continue
+ if a[1]==b[0]:
+ return 1
+ return 0
+
+ def getebuildpath(self,fullpackage):
+ cat,package=fullpackage.split("/")
+ return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
+
+ def getnode(self,mykey,use_cache=1):
+ mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
+ settings=self.settings)
+ if not mykey:
+ return []
+ mysplit=mykey.split("/")
+ mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
+ returnme=[]
+ for x in mydirlist:
+ mypsplit=pkgsplit(x)
+ if not mypsplit:
+ self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
+ continue
+ if mypsplit[0]==mysplit[1]:
+ appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
+ returnme.append(appendme)
+ return returnme
+
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ try:
+ return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+ except KeyError:
+ return ""
+
+ def hasnode(self,mykey,use_cache):
+ """Does the particular node (cat/pkg key) exist?"""
+ mykey = key_expand(mykey, mydb=self.dbapi, use_cache=use_cache,
+ settings=self.settings)
+ mysplit=mykey.split("/")
+ mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
+ for x in mydirlist:
+ mypsplit=pkgsplit(x)
+ if not mypsplit:
+ self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
+ continue
+ if mypsplit[0]==mysplit[1]:
+ return 1
+ return 0
+
+ def populate(self):
+ self.populated=1
+
+auxdbkeys=[
+ 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
+ 'PDEPEND', 'PROVIDE', 'EAPI',
+ 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
+ 'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
+ ]
+auxdbkeylen=len(auxdbkeys)
+
+def close_portdbapi_caches():
+ for i in portdbapi.portdbapi_instances:
+ i.close_caches()
+
+
+class portdbapi(dbapi):
+ """this tree will scan a portage directory located at root (passed to init)"""
+ portdbapi_instances = []
+
+ def __init__(self,porttree_root,mysettings=None):
+ portdbapi.portdbapi_instances.append(self)
+
+ if mysettings:
+ self.mysettings = mysettings
+ else:
+ global settings
+ self.mysettings = config(clone=settings)
+
+ # This is strictly for use in aux_get() doebuild calls when metadata
+ # is generated by the depend phase. It's safest to use a clone for
+ # this purpose because doebuild makes many changes to the config
+ # instance that is passed in.
+ self.doebuild_settings = config(clone=self.mysettings)
+
+ self.manifestVerifyLevel = None
+ self.manifestVerifier = None
+ self.manifestCache = {} # {location: [stat, md5]}
+ self.manifestMissingCache = []
+
+ if "gpg" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.EXISTS
+ if "strict" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.MARGINAL
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+ elif "severe" in self.mysettings.features:
+ self.manifestVerifyLevel = portage_gpg.TRUSTED
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
+ else:
+ self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
+
+ #self.root=settings["PORTDIR"]
+ self.porttree_root = os.path.realpath(porttree_root)
+
+ self.depcachedir = self.mysettings.depcachedir[:]
+
+ self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
+ if self.tmpfs and not os.path.exists(self.tmpfs):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
+ self.tmpfs = None
+ if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
+ self.tmpfs = None
+
+ self.eclassdb = eclass_cache.cache(self.porttree_root,
+ overlays=self.mysettings["PORTDIR_OVERLAY"].split())
+
+ self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
+
+ #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+ self.xcache={}
+ self.frozen=0
+
+ self.porttrees = [self.porttree_root] + \
+ [os.path.realpath(t) for t in self.mysettings["PORTDIR_OVERLAY"].split()]
+ self.treemap = {}
+ for path in self.porttrees:
+ repo_name_path = os.path.join( path, REPO_NAME_LOC )
+ try:
+ repo_name = open( repo_name_path ,'r').readline().strip()
+ self.treemap[repo_name] = path
+ except (OSError,IOError):
+ pass
+
+ self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
+ self.auxdb = {}
+ self._init_cache_dirs()
+ # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
+ # ~harring
+ filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
+ if secpass < 1:
+ from cache import metadata_overlay, volatile
+ for x in self.porttrees:
+ db_ro = self.auxdbmodule(self.depcachedir, x,
+ filtered_auxdbkeys, gid=portage_gid, readonly=True)
+ self.auxdb[x] = metadata_overlay.database(
+ self.depcachedir, x, filtered_auxdbkeys,
+ gid=portage_gid, db_rw=volatile.database,
+ db_ro=db_ro)
+ else:
+ for x in self.porttrees:
+ # location, label, auxdbkeys
+ self.auxdb[x] = self.auxdbmodule(
+ self.depcachedir, x, filtered_auxdbkeys, gid=portage_gid)
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(["EAPI", "KEYWORDS", "SLOT"])
+ self._aux_cache = {}
+
+ def _init_cache_dirs(self):
+ """Create /var/cache/edb/dep and adjust permissions for the portage
+ group."""
+
+ dirmode = 02070
+ filemode = 060
+ modemask = 02
+
+ try:
+ for mydir in (self.depcachedir,):
+ if portage_util.ensure_dirs(mydir, gid=portage_gid, mode=dirmode, mask=modemask):
+ writemsg("Adjusting permissions recursively: '%s'\n" % mydir,
+ noiselevel=-1)
+ def onerror(e):
+ raise # bail out on the first error that occurs during recursion
+ if not apply_recursive_permissions(mydir,
+ gid=portage_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise portage_exception.OperationNotPermitted(
+ "Failed to apply recursive permissions for the portage group.")
+ except portage_exception.PortageException, e:
+ pass
+
+ def close_caches(self):
+ for x in self.auxdb.keys():
+ self.auxdb[x].sync()
+ self.auxdb.clear()
+
+ def flush_cache(self):
+ for x in self.auxdb.values():
+ x.sync()
+
+ def finddigest(self,mycpv):
+ try:
+ mydig = self.findname2(mycpv)[0]
+ if not mydig:
+ return ""
+ mydigs = mydig.split("/")[:-1]
+ mydig = "/".join(mydigs)
+ mysplit = mycpv.split("/")
+ except OSError:
+ return ""
+ return mydig+"/files/digest-"+mysplit[-1]
+
+ def findname(self,mycpv):
+ return self.findname2(mycpv)[0]
+
+ def getRepositoryPath( self, repository_id ):
+ """
+ This function is required for GLEP 42 compliance; given a valid repository ID
+ it must return a path to the repository
+ TreeMap = { id:path }
+ """
+ if repository_id in self.treemap:
+ return self.treemap[repository_id]
+ return None
+
+ def getRepositories( self ):
+ """
+ This function is required for GLEP 42 compliance; it will return a list of
+ repository ID's
+ TreeMap = { id:path }
+ """
+ return [k for k in self.treemap.keys() if k]
+
+ def findname2(self, mycpv, mytree=None):
+ """
+ Returns the location of the CPV, and what overlay it was in.
+ Searches overlays first, then PORTDIR; this allows us to return the first
+ matching file. As opposed to starting in portdir and then doing overlays
+ second, we would have to exhaustively search the overlays until we found
+ the file we wanted.
+ """
+ if not mycpv:
+ return "",0
+ mysplit=mycpv.split("/")
+ psplit=pkgsplit(mysplit[1])
+
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = self.porttrees[:]
+ mytrees.reverse()
+ if psplit:
+ for x in mytrees:
+ file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
+ if os.access(file, os.R_OK):
+ return[file, x]
+ return None, 0
+
+ def aux_get(self, mycpv, mylist, mytree=None):
+ "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
+ cache_me = False
+ if not mytree and not set(mylist).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ return [aux_cache[x] for x in mylist]
+ cache_me = True
+ global auxdbkeys,auxdbkeylen
+ cat,pkg = mycpv.split("/", 1)
+
+ myebuild, mylocation = self.findname2(mycpv, mytree)
+
+ if not myebuild:
+ writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv},
+ noiselevel=1)
+ writemsg("!!! %s\n" % myebuild, noiselevel=1)
+ raise KeyError(mycpv)
+
+ myManifestPath = "/".join(myebuild.split("/")[:-1])+"/Manifest"
+ if "gpg" in self.mysettings.features:
+ try:
+ mys = portage_gpg.fileStats(myManifestPath)
+ if (myManifestPath in self.manifestCache) and \
+ (self.manifestCache[myManifestPath] == mys):
+ pass
+ elif self.manifestVerifier:
+ if not self.manifestVerifier.verify(myManifestPath):
+ # Verification failed the desired level.
+ raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
+
+ if ("severe" in self.mysettings.features) and \
+ (mys != portage_gpg.fileStats(myManifestPath)):
+ raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
+
+ except portage_exception.InvalidSignature, e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise
+ writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
+ except portage_exception.MissingSignature, e:
+ if ("severe" in self.mysettings.features):
+ raise
+ if ("strict" in self.mysettings.features):
+ if myManifestPath not in self.manifestMissingCache:
+ writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
+ self.manifestMissingCache.insert(0,myManifestPath)
+ except (OSError,portage_exception.FileNotFound), e:
+ if ("strict" in self.mysettings.features) or \
+ ("severe" in self.mysettings.features):
+ raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
+ writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath},
+ noiselevel=-1)
+
+
+ if os.access(myebuild, os.R_OK):
+ emtime=os.stat(myebuild)[stat.ST_MTIME]
+ else:
+ writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv},
+ noiselevel=-1)
+ writemsg("!!! %s\n" % myebuild,
+ noiselevel=-1)
+ raise KeyError
+
+ try:
+ mydata = self.auxdb[mylocation][mycpv]
+ if emtime != long(mydata.get("_mtime_", 0)):
+ doregen = True
+ elif len(mydata.get("_eclasses_", [])) > 0:
+ doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
+ else:
+ doregen = False
+
+ except KeyError:
+ doregen = True
+ except CacheError:
+ doregen = True
+ try: del self.auxdb[mylocation][mycpv]
+ except KeyError: pass
+
+ writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
+
+ if doregen:
+ writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
+ writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
+
+ self.doebuild_settings.reset()
+ mydata = {}
+ myret = doebuild(myebuild, "depend",
+ self.doebuild_settings["ROOT"], self.doebuild_settings,
+ dbkey=mydata, tree="porttree", mydbapi=self)
+ if myret != os.EX_OK:
+ raise KeyError(mycpv)
+
+ if "EAPI" not in mydata or not mydata["EAPI"].strip():
+ mydata["EAPI"] = "0"
+
+ if not eapi_is_supported(mydata["EAPI"]):
+ # if newer version, wipe everything and negate eapi
+ eapi = mydata["EAPI"]
+ mydata = {}
+ map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
+ mydata["EAPI"] = "-"+eapi
+
+ if mydata.get("INHERITED", False):
+ mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
+ else:
+ mydata["_eclasses_"] = {}
+
+ del mydata["INHERITED"]
+
+ mydata["_mtime_"] = emtime
+
+ self.auxdb[mylocation][mycpv] = mydata
+
+ if not mydata.setdefault("EAPI", "0"):
+ mydata["EAPI"] = "0"
+
+ #finally, we look at our internal cache entry and return the requested data.
+ returnme = []
+ for x in mylist:
+ if x == "INHERITED":
+ returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
+ else:
+ returnme.append(mydata.get(x,""))
+
+ if cache_me:
+ aux_cache = {}
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, "")
+ self._aux_cache[mycpv] = aux_cache
+
+ return returnme
+
+ def getfetchlist(self, mypkg, useflags=None, mysettings=None, all=0, mytree=None):
+ if mysettings is None:
+ mysettings = self.mysettings
+ try:
+ myuris = self.aux_get(mypkg, ["SRC_URI"], mytree=mytree)[0]
+ except KeyError:
+ print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
+ sys.exit(1)
+
+ if useflags is None:
+ useflags = mysettings["USE"].split()
+
+ myurilist = portage_dep.paren_reduce(myuris)
+ myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
+ newuris = flatten(myurilist)
+
+ myfiles = []
+ for x in newuris:
+ mya = os.path.basename(x)
+ if not mya in myfiles:
+ myfiles.append(mya)
+ return [newuris, myfiles]
+
+ def getfetchsizes(self,mypkg,useflags=None,debug=0):
+ # returns a filename:size dictionnary of remaining downloads
+ myebuild = self.findname(mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
+ checksums = mf.getDigests()
+ if not checksums:
+ if debug: print "[empty/missing/bad digest]: "+mypkg
+ return None
+ filesdict={}
+ if useflags is None:
+ myuris, myfiles = self.getfetchlist(mypkg,all=1)
+ else:
+ myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
+ #XXX: maybe this should be improved: take partial downloads
+ # into account? check checksums?
+ for myfile in myfiles:
+ if myfile not in checksums:
+ if debug:
+ writemsg("[bad digest]: missing %s for %s\n" % (myfile, mypkg))
+ continue
+ file_path = os.path.join(self.mysettings["DISTDIR"], myfile)
+ mystat = None
+ try:
+ mystat = os.stat(file_path)
+ except OSError, e:
+ pass
+ if mystat is None:
+ existing_size = 0
+ else:
+ existing_size = mystat.st_size
+ remaining_size = int(checksums[myfile]["size"]) - existing_size
+ if remaining_size > 0:
+ # Assume the download is resumable.
+ filesdict[myfile] = remaining_size
+ elif remaining_size < 0:
+ # The existing file is too large and therefore corrupt.
+ filesdict[myfile] = int(checksums[myfile]["size"])
+ return filesdict
+
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
+ if not useflags:
+ if mysettings:
+ useflags = mysettings["USE"].split()
+ myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
+ myebuild = self.findname(mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = Manifest(pkgdir, self.mysettings["DISTDIR"])
+ mysums = mf.getDigests()
+
+ failures = {}
+ for x in myfiles:
+ if not mysums or x not in mysums:
+ ok = False
+ reason = "digest missing"
+ else:
+ try:
+ ok, reason = portage_checksum.verify_all(
+ os.path.join(self.mysettings["DISTDIR"], x), mysums[x])
+ except portage_exception.FileNotFound, e:
+ ok = False
+ reason = "File Not Found: '%s'" % str(e)
+ if not ok:
+ failures[x] = reason
+ if failures:
+ return False
+ return True
+
+ def getsize(self,mypkg,useflags=None,debug=0):
+ # returns the total size of remaining downloads
+ #
+ # we use getfetchsizes() now, so this function would be obsoleted
+ #
+ filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
+ if filesdict is None:
+ return "[empty/missing/bad digest]"
+ mysize=0
+ for myfile in filesdict.keys():
+ mysum+=filesdict[myfile]
+ return mysum
+
+ def cpv_exists(self,mykey):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ cps2=mykey.split("/")
+ cps=catpkgsplit(mykey,silent=0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
+ if self.findname(cps[0]+"/"+cps2[1]):
+ return 1
+ else:
+ return 0
+
+ def cp_all(self):
+ "returns a list of all keys in our tree"
+ d={}
+ for x in self.mysettings.categories:
+ for oroot in self.porttrees:
+ for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
+ d[x+"/"+y] = None
+ l = d.keys()
+ l.sort()
+ return l
+
+ def p_list(self,mycp):
+ d={}
+ for oroot in self.porttrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x[-7:]==".ebuild":
+ d[x[:-7]] = None
+ return d.keys()
+
+ def cp_list(self, mycp, use_cache=1, mytree=None):
+ mysplit=mycp.split("/")
+ d={}
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = self.porttrees
+ for oroot in mytrees:
+ for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
+ if x.endswith(".ebuild"):
+ pf = x[:-7]
+ ps = pkgsplit(pf)
+ if not ps:
+ writemsg("\nInvalid ebuild name: %s\n" % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ d[mysplit[0]+"/"+pf] = None
+ return d.keys()
+
+ def freeze(self):
+ for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
+ self.xcache[x]={}
+ self.frozen=1
+
+ def melt(self):
+ self.xcache={}
+ self.frozen=0
+
+ def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+ "caching match function; very trick stuff"
+ #if no updates are being made to the tree, we can consult our xcache...
+ if self.frozen:
+ try:
+ return self.xcache[level][origdep][:]
+ except KeyError:
+ pass
+
+ if not mydep:
+ #this stuff only runs on first call of xmatch()
+ #create mydep, mykey from origdep
+ mydep = dep_expand(origdep, mydb=self, settings=self.mysettings)
+ mykey=dep_getkey(mydep)
+
+ if level=="list-visible":
+ #a list of all visible packages, not called directly (just by xmatch())
+ #myval=self.visible(self.cp_list(mykey))
+ myval=self.gvisible(self.visible(self.cp_list(mykey)))
+ elif level=="bestmatch-visible":
+ #dep match -- best match of all visible packages
+ myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
+ #get all visible matches (from xmatch()), then choose the best one
+ elif level=="bestmatch-list":
+ #dep match -- find best match but restrict search to sublist
+ myval=best(match_from_list(mydep,mylist))
+ #no point is calling xmatch again since we're not caching list deps
+ elif level=="match-list":
+ #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+ myval=match_from_list(mydep,mylist)
+ elif level=="match-visible":
+ #dep match -- find all visible matches
+ myval = match_from_list(mydep,
+ self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey))
+ #get all visible packages, then get the matching ones
+ elif level=="match-all":
+ #match *all* visible *and* masked packages
+ myval=match_from_list(mydep,self.cp_list(mykey))
+ else:
+ print "ERROR: xmatch doesn't handle",level,"query!"
+ raise KeyError
+ myslot = portage_dep.dep_getslot(mydep)
+ if myslot is not None:
+ slotmatches = []
+ for cpv in myval:
+ try:
+ if self.aux_get(cpv, ["SLOT"])[0] == myslot:
+ slotmatches.append(cpv)
+ except KeyError:
+ pass # ebuild masked by corruption
+ myval = slotmatches
+ if self.frozen and (level not in ["match-list","bestmatch-list"]):
+ self.xcache[level][mydep]=myval
+ if origdep and origdep != mydep:
+ self.xcache[level][origdep] = myval
+ return myval[:]
+
+ def match(self,mydep,use_cache=1):
+ return self.xmatch("match-visible",mydep)
+
+ def visible(self,mylist):
+ """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
+ packages file to remove invisible entries, returning remaining items. This function assumes
+ that all entries in mylist have the same category and package name."""
+ if (mylist is None) or (len(mylist)==0):
+ return []
+ newlist=mylist[:]
+ #first, we mask out packages in the package.mask file
+ mykey=newlist[0]
+ cpv=catpkgsplit(mykey)
+ if not cpv:
+ #invalid cat/pkg-v
+ print "visible(): invalid cat/pkg-v:",mykey
+ return []
+ mycp=cpv[0]+"/"+cpv[1]
+ maskdict=self.mysettings.pmaskdict
+ unmaskdict=self.mysettings.punmaskdict
+ if maskdict.has_key(mycp):
+ for x in maskdict[mycp]:
+ mymatches=self.xmatch("match-all",x)
+ if mymatches is None:
+ #error in package.mask file; print warning and continue:
+ print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
+ continue
+ for y in mymatches:
+ unmask=0
+ if unmaskdict.has_key(mycp):
+ for z in unmaskdict[mycp]:
+ mymatches_unmask=self.xmatch("match-all",z)
+ if y in mymatches_unmask:
+ unmask=1
+ break
+ if unmask==0:
+ try:
+ newlist.remove(y)
+ except ValueError:
+ pass
+
+ revmaskdict=self.mysettings.prevmaskdict
+ if revmaskdict.has_key(mycp):
+ for x in revmaskdict[mycp]:
+ #important: only match against the still-unmasked entries...
+ #notice how we pass "newlist" to the xmatch() call below....
+ #Without this, ~ deps in the packages files are broken.
+ mymatches=self.xmatch("match-list",x,mylist=newlist)
+ if mymatches is None:
+ #error in packages file; print warning and continue:
+ print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
+ continue
+ pos=0
+ while pos<len(newlist):
+ if newlist[pos] not in mymatches:
+ del newlist[pos]
+ else:
+ pos += 1
+ return newlist
+
+ def gvisible(self,mylist):
+ "strip out group-masked (not in current group) entries"
+
+ if mylist is None:
+ return []
+ newlist=[]
+
+ accept_keywords = self.mysettings["ACCEPT_KEYWORDS"].split()
+ pkgdict = self.mysettings.pkeywordsdict
+ for mycpv in mylist:
+ try:
+ keys, eapi = self.aux_get(mycpv, ["KEYWORDS", "EAPI"])
+ except KeyError:
+ continue
+ except portage_exception.PortageException, e:
+ writemsg("!!! Error: aux_get('%s', ['KEYWORDS', 'EAPI'])\n" % \
+ mycpv, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+ continue
+ mygroups=keys.split()
+ # Repoman may modify this attribute as necessary.
+ pgroups = accept_keywords[:]
+ match=0
+ cp = dep_getkey(mycpv)
+ if pkgdict.has_key(cp):
+ matches = match_to_list(mycpv, pkgdict[cp].keys())
+ for atom in matches:
+ pgroups.extend(pkgdict[cp][atom])
+ if matches:
+ inc_pgroups = []
+ for x in pgroups:
+ # The -* special case should be removed once the tree
+ # is clean of KEYWORDS=-* crap
+ if x != "-*" and x.startswith("-"):
+ try:
+ inc_pgroups.remove(x[1:])
+ except ValueError:
+ pass
+ if x not in inc_pgroups:
+ inc_pgroups.append(x)
+ pgroups = inc_pgroups
+ del inc_pgroups
+ hasstable = False
+ hastesting = False
+ for gp in mygroups:
+ if gp=="*":
+ writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv,
+ noiselevel=-1)
+ match=1
+ break
+ elif gp in pgroups:
+ match=1
+ break
+ elif gp[0] == "~":
+ hastesting = True
+ elif gp[0] != "-":
+ hasstable = True
+ if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups) or "**" in pgroups):
+ match=1
+ if match and eapi_is_supported(eapi):
+ newlist.append(mycpv)
+ return newlist
+
+class binarytree(object):
+ "this tree scans for a list of all packages available in PKGDIR"
+ def __init__(self, root, pkgdir, virtual=None, settings=None, clone=None):
+ if clone:
+ # XXX This isn't cloning. It's an instance of the same thing.
+ self.root=clone.root
+ self.pkgdir=clone.pkgdir
+ self.dbapi=clone.dbapi
+ self.populated=clone.populated
+ self.tree=clone.tree
+ self.remotepkgs=clone.remotepkgs
+ self.invalids=clone.invalids
+ self.settings = clone.settings
+ else:
+ self.root=root
+ #self.pkgdir=settings["PKGDIR"]
+ self.pkgdir = normalize_path(pkgdir)
+ self.dbapi = bindbapi(self, settings=settings)
+ self.populated=0
+ self.tree={}
+ self.remotepkgs={}
+ self.invalids=[]
+ self.settings = settings
+ self._pkg_paths = {}
+
+ def move_ent(self,mylist):
+ if not self.populated:
+ self.populate()
+ origcp=mylist[1]
+ newcp=mylist[2]
+ # sanity check
+ for cp in [origcp,newcp]:
+ if not (isvalidatom(cp) and isjustname(cp)):
+ raise portage_exception.InvalidPackageName(cp)
+ origcat = origcp.split("/")[0]
+ mynewcat=newcp.split("/")[0]
+ origmatches=self.dbapi.cp_list(origcp)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+
+ mycpsplit=catpkgsplit(mycpv)
+ mynewcpv=newcp+"-"+mycpsplit[2]
+ if mycpsplit[3]!="r0":
+ mynewcpv += "-"+mycpsplit[3]
+ myoldpkg=mycpv.split("/")[1]
+ mynewpkg=mynewcpv.split("/")[1]
+
+ if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
+ writemsg("!!! Cannot update binary: Destination exists.\n",
+ noiselevel=-1)
+ writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
+ continue
+
+ tbz2path=self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
+ noiselevel=-1)
+ continue
+
+ #print ">>> Updating data in:",mycpv
+ writemsg_stdout("%")
+ mytbz2 = xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ updated_items = update_dbentries([mylist], mydata)
+ mydata.update(updated_items)
+ mydata["CATEGORY"] = mynewcat+"\n"
+ if mynewpkg != myoldpkg:
+ mydata[mynewpkg+".ebuild"] = mydata[myoldpkg+".ebuild"]
+ del mydata[myoldpkg+".ebuild"]
+ mydata["PF"] = mynewpkg + "\n"
+ mytbz2.recompose_mem(xpak.xpak_mem(mydata))
+
+ self.dbapi.cpv_remove(mycpv)
+ del self._pkg_paths[mycpv]
+ new_path = self.getname(mynewcpv)
+ self._pkg_paths[mynewcpv] = os.path.join(
+ *new_path.split(os.path.sep)[-2:])
+ if new_path != mytbz2:
+ try:
+ os.makedirs(os.path.dirname(new_path))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ del e
+ os.rename(tbz2path, new_path)
+ self._remove_symlink(mycpv)
+ if new_path.split(os.path.sep)[-2] == "All":
+ self._create_symlink(mynewcpv)
+ self.dbapi.cpv_inject(mynewcpv)
+
+ return 1
+
+ def _remove_symlink(self, cpv):
+ """Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
+ the ${PKGDIR}/${CATEGORY} directory if empty. The file will not be
+ removed if os.path.islink() returns False."""
+ mycat, mypkg = catsplit(cpv)
+ mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+ if os.path.islink(mylink):
+ """Only remove it if it's really a link so that this method never
+ removes a real package that was placed here to avoid a collision."""
+ os.unlink(mylink)
+ try:
+ os.rmdir(os.path.join(self.pkgdir, mycat))
+ except OSError, e:
+ if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
+ raise
+ del e
+
+ def _create_symlink(self, cpv):
+ """Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
+ ${PKGDIR}/${CATEGORY} directory, if necessary). Any file that may
+ exist in the location of the symlink will first be removed."""
+ mycat, mypkg = catsplit(cpv)
+ full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+ try:
+ os.makedirs(os.path.dirname(full_path))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ del e
+ try:
+ os.unlink(full_path)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
+
+ def move_slot_ent(self, mylist):
+ if not self.populated:
+ self.populate()
+ pkg=mylist[1]
+ origslot=mylist[2]
+ newslot=mylist[3]
+
+ if not isvalidatom(pkg):
+ raise portage_exception.InvalidAtom(pkg)
+
+ origmatches=self.dbapi.match(pkg)
+ if not origmatches:
+ return
+ for mycpv in origmatches:
+ mycpsplit=catpkgsplit(mycpv)
+ myoldpkg=mycpv.split("/")[1]
+ tbz2path=self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
+ noiselevel=-1)
+ continue
+
+ #print ">>> Updating data in:",mycpv
+ mytbz2 = xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+
+ slot = mydata["SLOT"]
+ if (not slot):
+ continue
+
+ if (slot[0]!=origslot):
+ continue
+
+ writemsg_stdout("S")
+ mydata["SLOT"] = newslot+"\n"
+ mytbz2.recompose_mem(xpak.xpak_mem(mydata))
+ return 1
+
+ def update_ents(self, update_iter):
+ if len(update_iter) == 0:
+ return
+ if not self.populated:
+ self.populate()
+
+ for mycpv in self.dbapi.cp_all():
+ tbz2path=self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg("!!! Cannot update readonly binary: "+mycpv+"\n",
+ noiselevel=-1)
+ continue
+ #print ">>> Updating binary data:",mycpv
+ writemsg_stdout("*")
+ mytbz2 = xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ updated_items = update_dbentries(update_iter, mydata)
+ if len(updated_items) > 0:
+ mydata.update(updated_items)
+ mytbz2.recompose_mem(xpak.xpak_mem(mydata))
+ return 1
+
+ def prevent_collision(self, cpv):
+ """Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
+ use for a given cpv. If a collision will occur with an existing
+ package from another category, the existing package will be bumped to
+ ${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
+ full_path = self.getname(cpv)
+ if "All" == full_path.split(os.path.sep)[-2]:
+ return
+ """Move a colliding package if it exists. Code below this point only
+ executes in rare cases."""
+ mycat, mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ mypath = os.path.join("All", myfile)
+ dest_path = os.path.join(self.pkgdir, mypath)
+ if os.path.exists(dest_path):
+ # For invalid packages, other_cat could be None.
+ other_cat = xpak.tbz2(dest_path).getfile("CATEGORY")
+ if other_cat:
+ other_cat = other_cat.strip()
+ self._move_from_all(other_cat + "/" + mypkg)
+ """The file may or may not exist. Move it if necessary and update
+ internal state for future calls to getname()."""
+ self._move_to_all(cpv)
+
+ def _move_to_all(self, cpv):
+ """If the file exists, move it. Whether or not it exists, update state
+ for future getname() calls."""
+ mycat , mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ src_path = os.path.join(self.pkgdir, mycat, myfile)
+ try:
+ mystat = os.lstat(src_path)
+ except OSError, e:
+ mystat = None
+ if mystat and stat.S_ISREG(mystat.st_mode):
+ try:
+ os.makedirs(os.path.join(self.pkgdir, "All"))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ del e
+ os.rename(src_path, os.path.join(self.pkgdir, "All", myfile))
+ self._create_symlink(cpv)
+ self._pkg_paths[cpv] = os.path.join("All", myfile)
+
+ def _move_from_all(self, cpv):
+ """Move a package from ${PKGDIR}/All/${PF}.tbz2 to
+ ${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
+ self._remove_symlink(cpv)
+ mycat , mypkg = catsplit(cpv)
+ myfile = mypkg + ".tbz2"
+ mypath = os.path.join(mycat, myfile)
+ dest_path = os.path.join(self.pkgdir, mypath)
+ try:
+ os.makedirs(os.path.dirname(dest_path))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ del e
+ os.rename(os.path.join(self.pkgdir, "All", myfile), dest_path)
+ self._pkg_paths[cpv] = mypath
+
+ def populate(self, getbinpkgs=0,getbinpkgsonly=0):
+ "populates the binarytree"
+ if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
+ return 0
+ if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
+ return 0
+
+ if not getbinpkgsonly:
+ pkg_paths = {}
+ dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
+ if "All" in dirs:
+ dirs.remove("All")
+ dirs.sort()
+ dirs.insert(0, "All")
+ for mydir in dirs:
+ for myfile in listdir(os.path.join(self.pkgdir, mydir)):
+ if not myfile.endswith(".tbz2"):
+ continue
+ mypath = os.path.join(mydir, myfile)
+ full_path = os.path.join(self.pkgdir, mypath)
+ if os.path.islink(full_path):
+ continue
+ mytbz2 = xpak.tbz2(full_path)
+ # For invalid packages, mycat could be None.
+ mycat = mytbz2.getfile("CATEGORY")
+ mypf = mytbz2.getfile("PF")
+ mypkg = myfile[:-5]
+ if not mycat or not mypf:
+ #old-style or corrupt package
+ writemsg("!!! Invalid binary package: '%s'\n" % full_path,
+ noiselevel=-1)
+ writemsg("!!! This binary package is not " + \
+ "recoverable and should be deleted.\n",
+ noiselevel=-1)
+ self.invalids.append(mypkg)
+ continue
+ mycat = mycat.strip()
+ if mycat != mydir and mydir != "All":
+ continue
+ if mypkg != mypf.strip():
+ continue
+ mycpv = mycat + "/" + mypkg
+ if mycpv in pkg_paths:
+ # All is first, so it's preferred.
+ continue
+ pkg_paths[mycpv] = mypath
+ self.dbapi.cpv_inject(mycpv)
+ self._pkg_paths = pkg_paths
+
+ if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
+ writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
+ noiselevel=-1)
+
+ if getbinpkgs and \
+ self.settings["PORTAGE_BINHOST"] and not self.remotepkgs:
+ try:
+ chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
+ if chunk_size < 8:
+ chunk_size = 8
+ except (ValueError, KeyError):
+ chunk_size = 3000
+
+ writemsg(green("Fetching binary packages info...\n"))
+ self.remotepkgs = getbinpkg.dir_get_metadata(
+ self.settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
+ writemsg(green(" -- DONE!\n\n"))
+
+ for mypkg in self.remotepkgs.keys():
+ if not self.remotepkgs[mypkg].has_key("CATEGORY"):
+ #old-style or corrupt package
+ writemsg("!!! Invalid remote binary package: "+mypkg+"\n",
+ noiselevel=-1)
+ del self.remotepkgs[mypkg]
+ continue
+ mycat=self.remotepkgs[mypkg]["CATEGORY"].strip()
+ fullpkg=mycat+"/"+mypkg[:-5]
+ mykey=dep_getkey(fullpkg)
+ try:
+ # invalid tbz2's can hurt things.
+ #print "cpv_inject("+str(fullpkg)+")"
+ self.dbapi.cpv_inject(fullpkg)
+ #print " -- Injected"
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n",
+ noiselevel=-1)
+ del self.remotepkgs[mypkg]
+ continue
+ self.populated=1
+
+ def inject(self,cpv):
+ return self.dbapi.cpv_inject(cpv)
+
+ def exists_specific(self,cpv):
+ if not self.populated:
+ self.populate()
+ return self.dbapi.match(
+ dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method -- all matches, not just visible ones"
+ if not self.populated:
+ self.populate()
+ writemsg("\n\n", 1)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mykey=dep_getkey(mydep)
+ writemsg("mykey: %s\n" % mykey, 1)
+ mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
+ writemsg("mymatch: %s\n" % mymatch, 1)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def getname(self,pkgname):
+ """Returns a file location for this package. The default location is
+ ${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
+ in the rare event of a collision. The prevent_collision() method can
+ be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
+ specific cpv."""
+ if not self.populated:
+ self.populate()
+ mycpv = pkgname
+ mypath = self._pkg_paths.get(mycpv, None)
+ if mypath:
+ return os.path.join(self.pkgdir, mypath)
+ mycat, mypkg = catsplit(mycpv)
+ mypath = os.path.join("All", mypkg + ".tbz2")
+ if mypath in self._pkg_paths.values():
+ mypath = os.path.join(mycat, mypkg + ".tbz2")
+ self._pkg_paths[mycpv] = mypath # cache for future lookups
+ return os.path.join(self.pkgdir, mypath)
+
+ def isremote(self,pkgname):
+ "Returns true if the package is kept remotely."
+ mysplit=pkgname.split("/")
+ remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
+ return remote
+
+ def get_use(self,pkgname):
+ mysplit=pkgname.split("/")
+ if self.isremote(pkgname):
+ return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
+ tbz2=xpak.tbz2(self.getname(pkgname))
+ return tbz2.getfile("USE").split()
+
+ def gettbz2(self,pkgname):
+ "fetches the package from a remote site, if necessary."
+ print "Fetching '"+str(pkgname)+"'"
+ mysplit = pkgname.split("/")
+ tbz2name = mysplit[1]+".tbz2"
+ if not self.isremote(pkgname):
+ if (tbz2name not in self.invalids):
+ return
+ else:
+ writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n",
+ noiselevel=-1)
+ mydest = self.pkgdir+"/All/"
+ try:
+ os.makedirs(mydest, 0775)
+ except (OSError, IOError):
+ pass
+ return getbinpkg.file_get(
+ self.settings["PORTAGE_BINHOST"] + "/" + tbz2name,
+ mydest, fcmd=self.settings["RESUMECOMMAND"])
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ pass
+ return myslot
+
+class dblink:
+ """
+ This class provides an interface to the installed package database
+ At present this is implemented as a text backend in /var/db/pkg.
+ """
+ def __init__(self, cat, pkg, myroot, mysettings, treetype=None,
+ vartree=None):
+ """
+ Creates a DBlink object for a given CPV.
+ The given CPV may not be present in the database already.
+
+ @param cat: Category
+ @type cat: String
+ @param pkg: Package (PV)
+ @type pkg: String
+ @param myroot: Typically ${ROOT}
+ @type myroot: String (Path)
+ @param mysettings: Typically portage.config
+ @type mysettings: An instance of portage.config
+ @param treetype: one of ['porttree','bintree','vartree']
+ @type treetype: String
+ @param vartree: an instance of vartree corresponding to myroot.
+ @type vartree: vartree
+ """
+
+ self.cat = cat
+ self.pkg = pkg
+ self.mycpv = self.cat+"/"+self.pkg
+ self.mysplit = pkgsplit(self.mycpv)
+ self.treetype = treetype
+ if vartree is None:
+ global db
+ vartree = db[myroot]["vartree"]
+ self.vartree = vartree
+
+ self.dbroot = normalize_path(os.path.join(myroot, VDB_PATH))
+ self.dbcatdir = self.dbroot+"/"+cat
+ self.dbpkgdir = self.dbcatdir+"/"+pkg
+ self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
+ self.dbdir = self.dbpkgdir
+
+ self._lock_vdb = None
+
+ self.settings = mysettings
+ if self.settings==1:
+ raise ValueError
+
+ self.myroot=myroot
+ protect_obj = portage_util.ConfigProtect(myroot,
+ mysettings.get("CONFIG_PROTECT","").split(),
+ mysettings.get("CONFIG_PROTECT_MASK","").split())
+ self.updateprotect = protect_obj.updateprotect
+ self._config_protect = protect_obj
+ self._installed_instance = None
+ self.contentscache=[]
+ self._contents_inodes = None
+
+ def lockdb(self):
+ if self._lock_vdb:
+ raise AssertionError("Lock already held.")
+ # At least the parent needs to exist for the lock file.
+ portage_util.ensure_dirs(self.dbroot)
+ self._lock_vdb = portage_locks.lockdir(self.dbroot)
+
+ def unlockdb(self):
+ if self._lock_vdb:
+ portage_locks.unlockdir(self._lock_vdb)
+ self._lock_vdb = None
+
+ def getpath(self):
+ "return path to location of db information (for >>> informational display)"
+ return self.dbdir
+
+ def exists(self):
+ "does the db entry exist? boolean."
+ return os.path.exists(self.dbdir)
+
+ def create(self):
+ "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
+ """
+ This function should never get called (there is no reason to use it).
+ """
+ # XXXXX Delete this eventually
+ raise Exception, "This is bad. Don't use it."
+ if not os.path.exists(self.dbdir):
+ os.makedirs(self.dbdir)
+
+ def delete(self):
+ """
+ Remove this entry from the database
+ """
+ if not os.path.exists(self.dbdir):
+ return
+ try:
+ for x in listdir(self.dbdir):
+ os.unlink(self.dbdir+"/"+x)
+ os.rmdir(self.dbdir)
+ except OSError, e:
+ print "!!! Unable to remove db entry for this package."
+ print "!!! It is possible that a directory is in this one. Portage will still"
+ print "!!! register this package as installed as long as this directory exists."
+ print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
+ print "!!! "+str(e)
+ print
+ sys.exit(1)
+
+ def clearcontents(self):
+ """
+ For a given db entry (self), erase the CONTENTS values.
+ """
+ if os.path.exists(self.dbdir+"/CONTENTS"):
+ os.unlink(self.dbdir+"/CONTENTS")
+
+ def getcontents(self):
+ """
+ Get the installed files of a given package (aka what that package installed)
+ """
+ if not os.path.exists(self.dbdir+"/CONTENTS"):
+ return None
+ if self.contentscache != []:
+ return self.contentscache
+ pkgfiles={}
+ myc=open(self.dbdir+"/CONTENTS","r")
+ mylines=myc.readlines()
+ myc.close()
+ null_byte = "\0"
+ contents_file = os.path.join(self.dbdir, "CONTENTS")
+ pos = 0
+ for line in mylines:
+ pos += 1
+ if null_byte in line:
+ # Null bytes are a common indication of corruption.
+ writemsg("!!! Null byte found in contents " + \
+ "file, line %d: '%s'\n" % (pos, contents_file),
+ noiselevel=-1)
+ continue
+ mydat = line.split()
+ # we do this so we can remove from non-root filesystems
+ # (use the ROOT var to allow maintenance on other partitions)
+ try:
+ mydat[1] = normalize_path(os.path.join(
+ self.myroot, mydat[1].lstrip(os.path.sep)))
+ if mydat[0]=="obj":
+ #format: type, mtime, md5sum
+ pkgfiles[" ".join(mydat[1:-2])]=[mydat[0], mydat[-1], mydat[-2]]
+ elif mydat[0]=="dir":
+ #format: type
+ pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
+ elif mydat[0]=="sym":
+ #format: type, mtime, dest
+ x=len(mydat)-1
+ if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
+ mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
+ writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
+ x=len(mydat)-1
+ splitter=-1
+ while(x>=0):
+ if mydat[x]=="->":
+ splitter=x
+ break
+ x=x-1
+ if splitter==-1:
+ return None
+ pkgfiles[" ".join(mydat[1:splitter])]=[mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
+ elif mydat[0]=="dev":
+ #format: type
+ pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
+ elif mydat[0]=="fif":
+ #format: type
+ pkgfiles[" ".join(mydat[1:])]=[mydat[0]]
+ else:
+ return None
+ except (KeyError,IndexError):
+ print "portage: CONTENTS line",pos,"corrupt!"
+ self.contentscache=pkgfiles
+ return pkgfiles
+
+ def unmerge(self, pkgfiles=None, trimworld=1, cleanup=1,
+ ldpath_mtimes=None):
+ """
+ Calls prerm
+ Unmerges a given package (CPV)
+ calls postrm
+ calls cleanrm
+ calls env_update
+
+ @param pkgfiles: files to unmerge (generally self.getcontents() )
+ @type pkgfiles: Dictionary
+ @param trimworld: Remove CPV from world file if True, not if False
+ @type trimworld: Boolean
+ @param cleanup: cleanup to pass to doebuild (see doebuild)
+ @type cleanup: Boolean
+ @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
+ @type ldpath_mtimes: Dictionary
+ @rtype: Integer
+ @returns:
+ 1. os.EX_OK if everything went well.
+ 2. return code of the failed phase (for prerm, postrm, cleanrm)
+
+ Notes:
+ The caller must ensure that lockdb() and unlockdb() are called
+ before and after this method.
+ """
+
+ contents = self.getcontents()
+ # Now, don't assume that the name of the ebuild is the same as the
+ # name of the dir; the package may have been moved.
+ myebuildpath = None
+ mystuff = listdir(self.dbdir, EmptyOnError=1)
+ for x in mystuff:
+ if x.endswith(".ebuild"):
+ myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
+ if x[:-7] != self.pkg:
+ # Clean up after vardbapi.move_ent() breakage in
+ # portage versions before 2.1.2
+ os.rename(os.path.join(self.dbdir, x), myebuildpath)
+ write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
+ break
+
+ self.settings.load_infodir(self.dbdir)
+ if myebuildpath:
+ try:
+ doebuild_environment(myebuildpath, "prerm", self.myroot,
+ self.settings, 0, 0, self.vartree.dbapi)
+ except portage_exception.UnsupportedAPIException, e:
+ # Sometimes this happens due to corruption of the EAPI file.
+ writemsg("!!! FAILED prerm: %s\n" % \
+ os.path.join(self.dbdir, "EAPI"), noiselevel=-1)
+ writemsg("%s\n" % str(e), noiselevel=-1)
+ return 1
+ catdir = os.path.dirname(self.settings["PORTAGE_BUILDDIR"])
+ portage_util.ensure_dirs(os.path.dirname(catdir),
+ uid=portage_uid, gid=portage_gid, mode=070, mask=0)
+ builddir_lock = None
+ catdir_lock = None
+ try:
+ if myebuildpath:
+ catdir_lock = portage_locks.lockdir(catdir)
+ portage_util.ensure_dirs(catdir,
+ uid=portage_uid, gid=portage_gid,
+ mode=070, mask=0)
+ builddir_lock = portage_locks.lockdir(
+ self.settings["PORTAGE_BUILDDIR"])
+ try:
+ portage_locks.unlockdir(catdir_lock)
+ finally:
+ catdir_lock = None
+ # Eventually, we'd like to pass in the saved ebuild env here...
+ retval = doebuild(myebuildpath, "prerm", self.myroot,
+ self.settings, cleanup=cleanup, use_cache=0,
+ mydbapi=self.vartree.dbapi, tree="vartree",
+ vartree=self.vartree)
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ writemsg("!!! FAILED prerm: %s\n" % retval, noiselevel=-1)
+ return retval
+
+ self._unmerge_pkgfiles(pkgfiles)
+
+ if myebuildpath:
+ retval = doebuild(myebuildpath, "postrm", self.myroot,
+ self.settings, use_cache=0, tree="vartree",
+ mydbapi=self.vartree.dbapi, vartree=self.vartree)
+
+ # process logs created during pre/postrm
+ elog_process(self.mycpv, self.settings)
+
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ writemsg("!!! FAILED postrm: %s\n" % retval, noiselevel=-1)
+ return retval
+ doebuild(myebuildpath, "cleanrm", self.myroot, self.settings,
+ tree="vartree", mydbapi=self.vartree.dbapi,
+ vartree=self.vartree)
+
+ finally:
+ if builddir_lock:
+ portage_locks.unlockdir(builddir_lock)
+ try:
+ if myebuildpath and not catdir_lock:
+ # Lock catdir for removal if empty.
+ catdir_lock = portage_locks.lockdir(catdir)
+ finally:
+ if catdir_lock:
+ try:
+ os.rmdir(catdir)
+ except OSError, e:
+ if e.errno != errno.ENOTEMPTY:
+ raise
+ del e
+ portage_locks.unlockdir(catdir_lock)
+ env_update(target_root=self.myroot, prev_mtimes=ldpath_mtimes,
+ contents=contents)
+ return os.EX_OK
+
+ def _unmerge_pkgfiles(self, pkgfiles):
+ """
+
+ Unmerges the contents of a package from the liveFS
+ Removes the VDB entry for self
+
+ @param pkgfiles: typically self.getcontents()
+ @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
+ @rtype: None
+ """
+ global dircache
+ dircache={}
+
+ if not pkgfiles:
+ writemsg_stdout("No package files given... Grabbing a set.\n")
+ pkgfiles=self.getcontents()
+
+ if pkgfiles:
+ mykeys=pkgfiles.keys()
+ mykeys.sort()
+ mykeys.reverse()
+
+ #process symlinks second-to-last, directories last.
+ mydirs=[]
+ modprotect="/lib/modules/"
+ for objkey in mykeys:
+ obj = normalize_path(objkey)
+ if obj[:2]=="//":
+ obj=obj[1:]
+ statobj = None
+ try:
+ statobj = os.stat(obj)
+ except OSError:
+ pass
+ lstatobj = None
+ try:
+ lstatobj = os.lstat(obj)
+ except (OSError, AttributeError):
+ pass
+ islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
+ if statobj is None:
+ if not islink:
+ #we skip this if we're dealing with a symlink
+ #because os.stat() will operate on the
+ #link target rather than the link itself.
+ writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
+ continue
+ # next line includes a tweak to protect modules from being unmerged,
+ # but we don't protect modules from being overwritten if they are
+ # upgraded. We effectively only want one half of the config protection
+ # functionality for /lib/modules. For portage-ng both capabilities
+ # should be able to be independently specified.
+ if obj.startswith(modprotect):
+ writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
+ continue
+
+ lmtime=str(lstatobj[stat.ST_MTIME])
+ if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
+ writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
+ continue
+
+ if pkgfiles[objkey][0]=="dir":
+ if statobj is None or not stat.S_ISDIR(statobj.st_mode):
+ writemsg_stdout("--- !dir %s %s\n" % ("dir", obj))
+ continue
+ mydirs.append(obj)
+ elif pkgfiles[objkey][0]=="sym":
+ if not islink:
+ writemsg_stdout("--- !sym %s %s\n" % ("sym", obj))
+ continue
+ try:
+ os.unlink(obj)
+ writemsg_stdout("<<< %s %s\n" % ("sym",obj))
+ except (OSError,IOError),e:
+ writemsg_stdout("!!! %s %s\n" % ("sym",obj))
+ elif pkgfiles[objkey][0]=="obj":
+ if statobj is None or not stat.S_ISREG(statobj.st_mode):
+ writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
+ continue
+ mymd5 = None
+ try:
+ mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
+ except portage_exception.FileNotFound, e:
+ # the file has disappeared between now and our stat call
+ writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
+ continue
+
+ # string.lower is needed because db entries used to be in upper-case. The
+ # string.lower allows for backwards compatibility.
+ if mymd5 != pkgfiles[objkey][2].lower():
+ writemsg_stdout("--- !md5 %s %s\n" % ("obj", obj))
+ continue
+ try:
+ os.unlink(obj)
+ except (OSError,IOError),e:
+ pass
+ writemsg_stdout("<<< %s %s\n" % ("obj",obj))
+ elif pkgfiles[objkey][0]=="fif":
+ if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
+ writemsg_stdout("--- !fif %s %s\n" % ("fif", obj))
+ continue
+ writemsg_stdout("--- %s %s\n" % ("fif",obj))
+ elif pkgfiles[objkey][0]=="dev":
+ writemsg_stdout("--- %s %s\n" % ("dev",obj))
+
+ mydirs.sort()
+ mydirs.reverse()
+
+ for obj in mydirs:
+ try:
+ os.rmdir(obj)
+ writemsg_stdout("<<< %s %s\n" % ("dir",obj))
+ except (OSError, IOError):
+ writemsg_stdout("--- !empty dir %s\n" % obj)
+
+ #remove self from vartree database so that our own virtual gets zapped if we're the last node
+ self.vartree.zap(self.mycpv)
+
+ def isowner(self,filename,destroot):
+ """
+ Check if filename is a new file or belongs to this package
+ (for this or a previous version)
+
+ @param filename:
+ @type filename:
+ @param destroot:
+ @type destroot:
+ @rtype: Boolean
+ @returns:
+ 1. True if this package owns the file.
+ 2. False if this package does not own the file.
+ """
+ destfile = normalize_path(
+ os.path.join(destroot, filename.lstrip(os.path.sep)))
+ try:
+ mylstat = os.lstat(destfile)
+ except (OSError, IOError):
+ return True
+
+ pkgfiles = self.getcontents()
+ if pkgfiles and filename in pkgfiles:
+ return True
+ if pkgfiles:
+ if self._contents_inodes is None:
+ self._contents_inodes = set()
+ for x in pkgfiles:
+ try:
+ lstat = os.lstat(x)
+ self._contents_inodes.add((lstat.st_dev, lstat.st_ino))
+ except OSError:
+ pass
+ if (mylstat.st_dev, mylstat.st_ino) in self._contents_inodes:
+ return True
+
+ return False
+
+ def isprotected(self, filename):
+ """In cases where an installed package in the same slot owns a
+ protected file that will be merged, bump the mtime on the installed
+ file in order to ensure that it isn't unmerged."""
+ if not self._config_protect.isprotected(filename):
+ return False
+ if self._installed_instance is None:
+ return True
+ mydata = self._installed_instance.getcontents().get(filename, None)
+ if mydata is None:
+ return True
+
+ # Bump the mtime in order to ensure that the old config file doesn't
+ # get unmerged. The user will have an opportunity to merge the new
+ # config with the old one.
+ try:
+ os.utime(filename, None)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ # The file has disappeared, so it's not protected.
+ return False
+ return True
+
+ def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
+ mydbapi=None, prev_mtimes=None):
+ """
+
+ This function does the following:
+
+ Collision Protection.
+ calls doebuild(mydo=pkg_preinst)
+ Merges the package to the livefs
+ unmerges old version (if required)
+ calls doebuild(mydo=pkg_postinst)
+ calls env_update
+
+ @param srcroot: Typically this is ${D}
+ @type srcroot: String (Path)
+ @param destroot: Path to merge to (usually ${ROOT})
+ @type destroot: String (Path)
+ @param inforoot: root of the vardb entry ?
+ @type inforoot: String (Path)
+ @param myebuild: path to the ebuild that we are processing
+ @type myebuild: String (Path)
+ @param mydbapi: dbapi which is handed to doebuild.
+ @type mydbapi: portdbapi instance
+ @param prev_mtimes: { Filename:mtime } mapping for env_update
+ @type prev_mtimes: Dictionary
+ @rtype: Boolean
+ @returns:
+ 1. 0 on success
+ 2. 1 on failure
+
+ secondhand is a list of symlinks that have been skipped due to their target
+ not existing; we will merge these symlinks at a later time.
+ """
+ if not os.path.isdir(srcroot):
+ writemsg("!!! Directory Not Found: D='%s'\n" % srcroot,
+ noiselevel=-1)
+ return 1
+
+ if not os.path.exists(self.dbcatdir):
+ os.makedirs(self.dbcatdir)
+
+ otherversions=[]
+ for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
+ otherversions.append(v.split("/")[1])
+
+ slot_matches = self.vartree.dbapi.match(
+ "%s:%s" % (self.mysplit[0], self.settings["SLOT"]))
+ if slot_matches:
+ # Used by self.isprotected().
+ self._installed_instance = dblink(self.cat,
+ catsplit(slot_matches[0])[1], destroot, self.settings,
+ vartree=self.vartree)
+
+ # check for package collisions
+ if "collision-protect" in self.settings.features:
+ collision_ignore = set([normalize_path(myignore) for myignore in \
+ self.settings.get("COLLISION_IGNORE", "").split()])
+ myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
+
+ # the linkcheck only works if we are in srcroot
+ mycwd = getcwd()
+ os.chdir(srcroot)
+ mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
+ myfilelist.extend(mysymlinks)
+ mysymlinked_directories = [s + os.path.sep for s in mysymlinks]
+ del mysymlinks
+
+
+ stopmerge=False
+ starttime=time.time()
+ i=0
+
+ otherpkg=[]
+ mypkglist=[]
+
+ if self.pkg in otherversions:
+ otherversions.remove(self.pkg) # we already checked this package
+
+ myslot = self.settings["SLOT"]
+ for v in otherversions:
+ # only allow versions with same slot to overwrite files
+ if myslot == self.vartree.dbapi.aux_get("/".join((self.cat, v)), ["SLOT"])[0]:
+ mypkglist.append(
+ dblink(self.cat, v, destroot, self.settings,
+ vartree=self.vartree))
+
+ collisions = []
+
+ print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
+ for f in myfilelist:
+ nocheck = False
+ # listdir isn't intelligent enough to exclude symlinked dirs,
+ # so we have to do it ourself
+ for s in mysymlinked_directories:
+ if f.startswith(s):
+ nocheck = True
+ break
+ if nocheck:
+ continue
+ i=i+1
+ if i % 1000 == 0:
+ print str(i)+" files checked ..."
+ if f[0] != "/":
+ f="/"+f
+ isowned = False
+ for ver in [self]+mypkglist:
+ if (ver.isowner(f, destroot) or ver.isprotected(f)):
+ isowned = True
+ break
+ if not isowned:
+ collisions.append(f)
+ print "existing file "+f+" is not owned by this package"
+ stopmerge=True
+ if collision_ignore:
+ if f in collision_ignore:
+ stopmerge = False
+ else:
+ for myignore in collision_ignore:
+ if f.startswith(myignore + os.path.sep):
+ stopmerge = False
+ break
+ #print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
+ if stopmerge:
+ print red("*")+" This package is blocked because it wants to overwrite"
+ print red("*")+" files belonging to other packages (see messages above)."
+ print red("*")+" If you have no clue what this is all about report it "
+ print red("*")+" as a bug for this package on http://bugs.gentoo.org"
+ print
+ print red("package "+self.cat+"/"+self.pkg+" NOT merged")
+ print
+ print
+ print "Searching all installed packages for file collisions..."
+ print "Press Ctrl-C to Stop"
+ print
+ """ Note: The isowner calls result in a stat call for *every*
+ single installed file, since the inode numbers are used to work
+ around the problem of ambiguous paths caused by symlinked files
+ and/or directories. Though it is slow, it is as accurate as
+ possible."""
+ found_owner = False
+ for cpv in self.vartree.dbapi.cpv_all():
+ cat, pkg = catsplit(cpv)
+ mylink = dblink(cat, pkg, destroot, self.settings,
+ vartree=self.vartree)
+ mycollisions = []
+ for f in collisions:
+ if mylink.isowner(f, destroot):
+ mycollisions.append(f)
+ if mycollisions:
+ found_owner = True
+ print " * %s:" % cpv
+ print
+ for f in mycollisions:
+ print " '%s'" % \
+ os.path.join(destroot, f.lstrip(os.path.sep))
+ print
+ if not found_owner:
+ print "None of the installed packages claim the above file(s)."
+ print
+ sys.exit(1)
+ try:
+ os.chdir(mycwd)
+ except OSError:
+ pass
+
+ if os.stat(srcroot).st_dev == os.stat(destroot).st_dev:
+ """ The merge process may move files out of the image directory,
+ which causes invalidation of the .installed flag."""
+ try:
+ os.unlink(os.path.join(
+ os.path.dirname(normalize_path(srcroot)), ".installed"))
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ # get old contents info for later unmerging
+ oldcontents = self.getcontents()
+
+ self.dbdir = self.dbtmpdir
+ self.delete()
+ if not os.path.exists(self.dbtmpdir):
+ os.makedirs(self.dbtmpdir)
+
+ writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
+
+ # run preinst script
+ if myebuild is None:
+ myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+ a = doebuild(myebuild, "preinst", destroot, self.settings, cleanup=cleanup,
+ use_cache=0, tree=self.treetype, mydbapi=mydbapi,
+ vartree=self.vartree)
+
+ # XXX: Decide how to handle failures here.
+ if a != os.EX_OK:
+ writemsg("!!! FAILED preinst: "+str(a)+"\n", noiselevel=-1)
+ return a
+
+ # copy "info" files (like SLOT, CFLAGS, etc.) into the database
+ for x in listdir(inforoot):
+ self.copyfile(inforoot+"/"+x)
+
+ # get current counter value (counter_tick also takes care of incrementing it)
+ # XXX Need to make this destroot, but it needs to be initialized first. XXX
+ # XXX bis: leads to some invalidentry() call through cp_all().
+ counter = self.vartree.dbapi.counter_tick(self.myroot, mycpv=self.mycpv)
+ # write local package counter for recording
+ lcfile = open(self.dbtmpdir+"/COUNTER","w")
+ lcfile.write(str(counter))
+ lcfile.close()
+
+ # open CONTENTS file (possibly overwriting old one) for recording
+ outfile=open(self.dbtmpdir+"/CONTENTS","w")
+
+ self.updateprotect()
+
+ #if we have a file containing previously-merged config file md5sums, grab it.
+ conf_mem_file = os.path.join(destroot, CONFIG_MEMORY_FILE)
+ cfgfiledict = grabdict(conf_mem_file)
+ if self.settings.has_key("NOCONFMEM"):
+ cfgfiledict["IGNORE"]=1
+ else:
+ cfgfiledict["IGNORE"]=0
+
+ # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
+ mymtime = long(time.time())
+ prevmask = os.umask(0)
+ secondhand = []
+
+ # we do a first merge; this will recurse through all files in our srcroot but also build up a
+ # "second hand" of symlinks to merge later
+ if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
+ return 1
+
+ # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
+ # broken symlinks. We'll merge them too.
+ lastlen=0
+ while len(secondhand) and len(secondhand)!=lastlen:
+ # clear the thirdhand. Anything from our second hand that
+ # couldn't get merged will be added to thirdhand.
+
+ thirdhand=[]
+ self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
+
+ #swap hands
+ lastlen=len(secondhand)
+
+ # our thirdhand now becomes our secondhand. It's ok to throw
+ # away secondhand since thirdhand contains all the stuff that
+ # couldn't be merged.
+ secondhand = thirdhand
+
+ if len(secondhand):
+ # force merge of remaining symlinks (broken or circular; oh well)
+ self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
+
+ #restore umask
+ os.umask(prevmask)
+
+ #if we opened it, close it
+ outfile.flush()
+ outfile.close()
+
+ if os.path.exists(self.dbpkgdir):
+ writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
+ self.dbdir = self.dbpkgdir
+ self.unmerge(oldcontents, trimworld=0, ldpath_mtimes=prev_mtimes)
+ self.dbdir = self.dbtmpdir
+ writemsg_stdout(">>> Original instance of package unmerged safely.\n")
+
+ # We hold both directory locks.
+ self.dbdir = self.dbpkgdir
+ self.delete()
+ movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+ contents = self.getcontents()
+
+ #write out our collection of md5sums
+ if cfgfiledict.has_key("IGNORE"):
+ del cfgfiledict["IGNORE"]
+
+ my_private_path = os.path.join(destroot, PRIVATE_PATH)
+ if not os.path.exists(my_private_path):
+ os.makedirs(my_private_path)
+ os.chown(my_private_path, os.getuid(), portage_gid)
+ os.chmod(my_private_path, 02770)
+
+ writedict(cfgfiledict, conf_mem_file)
+ del conf_mem_file
+
+ #do postinst script
+ a = doebuild(myebuild, "postinst", destroot, self.settings, use_cache=0,
+ tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
+
+ # XXX: Decide how to handle failures here.
+ if a != os.EX_OK:
+ writemsg("!!! FAILED postinst: "+str(a)+"\n", noiselevel=-1)
+ return a
+
+ downgrade = False
+ for v in otherversions:
+ if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
+ downgrade = True
+
+ #update environment settings, library paths. DO NOT change symlinks.
+ env_update(makelinks=(not downgrade),
+ target_root=self.settings["ROOT"], prev_mtimes=prev_mtimes,
+ contents=contents)
+ #dircache may break autoclean because it remembers the -MERGING-pkg file
+ global dircache
+ if dircache.has_key(self.dbcatdir):
+ del dircache[self.dbcatdir]
+ writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
+
+ # Process ebuild logfiles
+ elog_process(self.mycpv, self.settings)
+ if "noclean" not in self.settings.features:
+ doebuild(myebuild, "clean", destroot, self.settings,
+ tree=self.treetype, mydbapi=mydbapi, vartree=self.vartree)
+ return os.EX_OK
+
+ def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
+ """
+
+ This function handles actual merging of the package contents to the livefs.
+ It also handles config protection.
+
+ @param srcroot: Where are we copying files from (usually ${D})
+ @type srcroot: String (Path)
+ @param destroot: Typically ${ROOT}
+ @type destroot: String (Path)
+ @param outfile: File to log operations to
+ @type outfile: File Object
+ @param secondhand: A set of items to merge in pass two (usually
+ or symlinks that point to non-existing files that may get merged later)
+ @type secondhand: List
+ @param stufftomerge: Either a diretory to merge, or a list of items.
+ @type stufftomerge: String or List
+ @param cfgfiledict: { File:mtime } mapping for config_protected files
+ @type cfgfiledict: Dictionary
+ @param thismtime: The current time (typically long(time.time())
+ @type thismtime: Long
+ @rtype: None or Boolean
+ @returns:
+ 1. True on failure
+ 2. None otherwise
+
+ """
+ from os.path import sep, join
+ srcroot = normalize_path(srcroot).rstrip(sep) + sep
+ destroot = normalize_path(destroot).rstrip(sep) + sep
+ # this is supposed to merge a list of files. There will be 2 forms of argument passing.
+ if type(stufftomerge)==types.StringType:
+ #A directory is specified. Figure out protection paths, listdir() it and process it.
+ mergelist = listdir(join(srcroot, stufftomerge))
+ offset=stufftomerge
+ else:
+ mergelist=stufftomerge
+ offset=""
+ for x in mergelist:
+ mysrc = join(srcroot, offset, x)
+ mydest = join(destroot, offset, x)
+ # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
+ myrealdest = join(sep, offset, x)
+ # stat file once, test using S_* macros many times (faster that way)
+ try:
+ mystat=os.lstat(mysrc)
+ except OSError, e:
+ writemsg("\n")
+ writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
+ writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
+ writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
+ writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
+ writemsg(red("!!! File: ")+str(mysrc)+"\n", noiselevel=-1)
+ writemsg(red("!!! Error: ")+str(e)+"\n", noiselevel=-1)
+ sys.exit(1)
+ except Exception, e:
+ writemsg("\n")
+ writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
+ writemsg(red("!!! A stat call returned the following error for the following file:"))
+ writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
+ writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
+ writemsg( "!!! File: "+str(mysrc)+"\n", noiselevel=-1)
+ writemsg( "!!! Error: "+str(e)+"\n", noiselevel=-1)
+ sys.exit(1)
+
+
+ mymode=mystat[stat.ST_MODE]
+ # handy variables; mydest is the target object on the live filesystems;
+ # mysrc is the source object in the temporary install dir
+ try:
+ mydmode = os.lstat(mydest).st_mode
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ #dest file doesn't exist
+ mydmode=None
+
+ if stat.S_ISLNK(mymode):
+ # we are merging a symbolic link
+ myabsto=abssymlink(mysrc)
+ if myabsto.startswith(srcroot):
+ myabsto=myabsto[len(srcroot):]
+ myabsto = myabsto.lstrip(sep)
+ myto=os.readlink(mysrc)
+ if self.settings and self.settings["D"]:
+ if myto.startswith(self.settings["D"]):
+ myto=myto[len(self.settings["D"]):]
+ # myrealto contains the path of the real file to which this symlink points.
+ # we can simply test for existence of this file to see if the target has been merged yet
+ myrealto = normalize_path(os.path.join(destroot, myabsto))
+ if mydmode!=None:
+ #destination exists
+ if not stat.S_ISLNK(mydmode):
+ if stat.S_ISDIR(mydmode):
+ # directory in the way: we can't merge a symlink over a directory
+ # we won't merge this, continue with next file...
+ continue
+
+ if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
+ # Kill file blocking installation of symlink to dir #71787
+ pass
+ elif self.isprotected(mydest):
+ # Use md5 of the target in ${D} if it exists...
+ try:
+ newmd5 = portage_checksum.perform_md5(
+ join(srcroot, myabsto))
+ except portage_exception.FileNotFound:
+ # Maybe the target is merged already.
+ try:
+ newmd5 = portage_checksum.perform_md5(
+ myrealto)
+ except portage_exception.FileNotFound:
+ newmd5 = None
+ mydest = new_protect_filename(mydest,newmd5=newmd5)
+
+ # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
+ if (secondhand!=None) and (not os.path.exists(myrealto)):
+ # either the target directory doesn't exist yet or the target file doesn't exist -- or
+ # the target is a broken symlink. We will add this file to our "second hand" and merge
+ # it later.
+ secondhand.append(mysrc[len(srcroot):])
+ continue
+ # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
+ mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
+ if mymtime!=None:
+ writemsg_stdout(">>> %s -> %s\n" % (mydest, myto))
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ else:
+ print "!!! Failed to move file."
+ print "!!!",mydest,"->",myto
+ sys.exit(1)
+ elif stat.S_ISDIR(mymode):
+ # we are merging a directory
+ if mydmode!=None:
+ # destination exists
+
+ if bsd_chflags:
+ # Save then clear flags on dest.
+ dflags=bsd_chflags.lgetflags(mydest)
+ if dflags != 0 and bsd_chflags.lchflags(mydest, 0) < 0:
+ writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n",
+ noiselevel=-1)
+
+ if not os.access(mydest, os.W_OK):
+ pkgstuff = pkgsplit(self.pkg)
+ writemsg("\n!!! Cannot write to '"+mydest+"'.\n", noiselevel=-1)
+ writemsg("!!! Please check permissions and directories for broken symlinks.\n")
+ writemsg("!!! You may start the merge process again by using ebuild:\n")
+ writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
+ writemsg("!!! And finish by running this: env-update\n\n")
+ return 1
+
+ if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
+ # a symlink to an existing directory will work for us; keep it:
+ writemsg_stdout("--- %s/\n" % mydest)
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ else:
+ # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
+ if movefile(mydest,mydest+".backup", mysettings=self.settings) is None:
+ sys.exit(1)
+ print "bak",mydest,mydest+".backup"
+ #now create our directory
+ if self.settings.selinux_enabled():
+ sid = selinux.get_sid(mysrc)
+ selinux.secure_mkdir(mydest,sid)
+ else:
+ os.mkdir(mydest)
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ os.chmod(mydest,mystat[0])
+ os.chown(mydest,mystat[4],mystat[5])
+ writemsg_stdout(">>> %s/\n" % mydest)
+ else:
+ #destination doesn't exist
+ if self.settings.selinux_enabled():
+ sid = selinux.get_sid(mysrc)
+ selinux.secure_mkdir(mydest,sid)
+ else:
+ os.mkdir(mydest)
+ os.chmod(mydest,mystat[0])
+ os.chown(mydest,mystat[4],mystat[5])
+ writemsg_stdout(">>> %s/\n" % mydest)
+ outfile.write("dir "+myrealdest+"\n")
+ # recurse and merge this directory
+ if self.mergeme(srcroot, destroot, outfile, secondhand,
+ join(offset, x), cfgfiledict, thismtime):
+ return 1
+ elif stat.S_ISREG(mymode):
+ # we are merging a regular file
+ mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
+ # calculate config file protection stuff
+ mydestdir=os.path.dirname(mydest)
+ moveme=1
+ zing="!!!"
+ if mydmode!=None:
+ # destination file exists
+ if stat.S_ISDIR(mydmode):
+ # install of destination is blocked by an existing directory with the same name
+ moveme=0
+ writemsg_stdout("!!! %s\n" % mydest)
+ elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
+ cfgprot=0
+ # install of destination is blocked by an existing regular file,
+ # or by a symlink to an existing regular file;
+ # now, config file management may come into play.
+ # we only need to tweak mydest if cfg file management is in play.
+ if self.isprotected(mydest):
+ # we have a protection path; enable config file management.
+ destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
+ if mymd5==destmd5:
+ #file already in place; simply update mtimes of destination
+ os.utime(mydest,(thismtime,thismtime))
+ zing="---"
+ moveme=0
+ else:
+ if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
+ """ An identical update has previously been
+ merged. Skip it unless the user has chosen
+ --noconfmem."""
+ zing = "-o-"
+ moveme = cfgfiledict["IGNORE"]
+ cfgprot = cfgfiledict["IGNORE"]
+ else:
+ moveme = 1
+ cfgprot = 1
+ if moveme:
+ # Merging a new file, so update confmem.
+ cfgfiledict[myrealdest] = [mymd5]
+ elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
+ """A previously remembered update has been
+ accepted, so it is removed from confmem."""
+ del cfgfiledict[myrealdest]
+ if cfgprot:
+ mydest = new_protect_filename(mydest, newmd5=mymd5)
+
+ # whether config protection or not, we merge the new file the
+ # same way. Unless moveme=0 (blocking directory)
+ if moveme:
+ mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
+ if mymtime is None:
+ sys.exit(1)
+ zing=">>>"
+ else:
+ mymtime=thismtime
+ # We need to touch the destination so that on --update the
+ # old package won't yank the file with it. (non-cfgprot related)
+ os.utime(mydest,(thismtime,thismtime))
+ zing="---"
+ if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
+
+ # XXX kludge, can be killed when portage stops relying on
+ # md5+mtime, and uses refcounts
+ # alright, we've fooled w/ mtime on the file; this pisses off static archives
+ # basically internal mtime != file's mtime, so the linker (falsely) thinks
+ # the archive is stale, and needs to have it's toc rebuilt.
+
+ myf = open(mydest, "r+")
+
+ # ar mtime field is digits padded with spaces, 12 bytes.
+ lms=str(thismtime+5).ljust(12)
+ myf.seek(0)
+ magic=myf.read(8)
+ if magic != "!<arch>\n":
+ # not an archive (dolib.a from portage.py makes it here fex)
+ myf.close()
+ else:
+ st = os.stat(mydest)
+ while myf.tell() < st.st_size - 12:
+ # skip object name
+ myf.seek(16,1)
+
+ # update mtime
+ myf.write(lms)
+
+ # skip uid/gid/mperm
+ myf.seek(20,1)
+
+ # read the archive member's size
+ x=long(myf.read(10))
+
+ # skip the trailing newlines, and add the potential
+ # extra padding byte if it's not an even size
+ myf.seek(x + 2 + (x % 2),1)
+
+ # and now we're at the end. yay.
+ myf.close()
+ mymd5 = portage_checksum.perform_md5(mydest, calc_prelink=1)
+ os.utime(mydest,(thismtime,thismtime))
+
+ if mymtime!=None:
+ zing=">>>"
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ writemsg_stdout("%s %s\n" % (zing,mydest))
+ else:
+ # we are merging a fifo or device node
+ zing="!!!"
+ if mydmode is None:
+ # destination doesn't exist
+ if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
+ zing=">>>"
+ else:
+ sys.exit(1)
+ if stat.S_ISFIFO(mymode):
+ outfile.write("fif %s\n" % myrealdest)
+ else:
+ outfile.write("dev %s\n" % myrealdest)
+ writemsg_stdout(zing+" "+mydest+"\n")
+
+ def merge(self, mergeroot, inforoot, myroot, myebuild=None, cleanup=0,
+ mydbapi=None, prev_mtimes=None):
+ try:
+ self.lockdb()
+ return self.treewalk(mergeroot, myroot, inforoot, myebuild,
+ cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
+ finally:
+ self.unlockdb()
+
+ def getstring(self,name):
+ "returns contents of a file with whitespace converted to spaces"
+ if not os.path.exists(self.dbdir+"/"+name):
+ return ""
+ myfile=open(self.dbdir+"/"+name,"r")
+ mydata=myfile.read().split()
+ myfile.close()
+ return " ".join(mydata)
+
+ def copyfile(self,fname):
+ shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
+
+ def getfile(self,fname):
+ if not os.path.exists(self.dbdir+"/"+fname):
+ return ""
+ myfile=open(self.dbdir+"/"+fname,"r")
+ mydata=myfile.read()
+ myfile.close()
+ return mydata
+
+ def setfile(self,fname,data):
+ write_atomic(os.path.join(self.dbdir, fname), data)
+
+ def getelements(self,ename):
+ if not os.path.exists(self.dbdir+"/"+ename):
+ return []
+ myelement=open(self.dbdir+"/"+ename,"r")
+ mylines=myelement.readlines()
+ myreturn=[]
+ for x in mylines:
+ for y in x[:-1].split():
+ myreturn.append(y)
+ myelement.close()
+ return myreturn
+
+ def setelements(self,mylist,ename):
+ myelement=open(self.dbdir+"/"+ename,"w")
+ for x in mylist:
+ myelement.write(x+"\n")
+ myelement.close()
+
+ def isregular(self):
+ "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
+ return os.path.exists(self.dbdir+"/CATEGORY")
+
+class FetchlistDict(UserDict.DictMixin):
+ """This provide a mapping interface to retrieve fetch lists. It's used
+ to allow portage_manifest.Manifest to access fetch lists via a standard
+ mapping interface rather than use the dbapi directly."""
+ def __init__(self, pkgdir, settings, mydbapi):
+ """pkgdir is a directory containing ebuilds and settings is passed into
+ portdbapi.getfetchlist for __getitem__ calls."""
+ self.pkgdir = pkgdir
+ self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
+ self.settings = settings
+ self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
+ self.portdb = mydbapi
+ def __getitem__(self, pkg_key):
+ """Returns the complete fetch list for a given package."""
+ return self.portdb.getfetchlist(pkg_key, mysettings=self.settings,
+ all=True, mytree=self.mytree)[1]
+ def has_key(self, pkg_key):
+ """Returns true if the given package exists within pkgdir."""
+ return pkg_key in self.keys()
+ def keys(self):
+ """Returns keys for all packages within pkgdir"""
+ return self.portdb.cp_list(self.cp, mytree=self.mytree)
+
+def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None):
+ """will merge a .tbz2 file, returning a list of runtime dependencies
+ that must be satisfied, or None if there was a merge error. This
+ code assumes the package exists."""
+ global db
+ if mydbapi is None:
+ mydbapi = db[myroot]["bintree"].dbapi
+ if vartree is None:
+ vartree = db[myroot]["vartree"]
+ if mytbz2[-5:]!=".tbz2":
+ print "!!! Not a .tbz2 file"
+ return 1
+
+ tbz2_lock = None
+ builddir_lock = None
+ catdir_lock = None
+ try:
+ """ Don't lock the tbz2 file because the filesytem could be readonly or
+ shared by a cluster."""
+ #tbz2_lock = portage_locks.lockfile(mytbz2, wantnewlockfile=1)
+
+ mypkg = os.path.basename(mytbz2)[:-5]
+ xptbz2 = xpak.tbz2(mytbz2)
+ mycat = xptbz2.getfile("CATEGORY")
+ if not mycat:
+ writemsg("!!! CATEGORY info missing from info chunk, aborting...\n",
+ noiselevel=-1)
+ return 1
+ mycat = mycat.strip()
+
+ # These are the same directories that would be used at build time.
+ builddir = os.path.join(
+ mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
+ catdir = os.path.dirname(builddir)
+ pkgloc = os.path.join(builddir, "image")
+ infloc = os.path.join(builddir, "build-info")
+ myebuild = os.path.join(
+ infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
+ portage_util.ensure_dirs(os.path.dirname(catdir),
+ uid=portage_uid, gid=portage_gid, mode=070, mask=0)
+ catdir_lock = portage_locks.lockdir(catdir)
+ portage_util.ensure_dirs(catdir,
+ uid=portage_uid, gid=portage_gid, mode=070, mask=0)
+ builddir_lock = portage_locks.lockdir(builddir)
+ try:
+ portage_locks.unlockdir(catdir_lock)
+ finally:
+ catdir_lock = None
+ try:
+ shutil.rmtree(builddir)
+ except (IOError, OSError), e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ for mydir in (builddir, pkgloc, infloc):
+ portage_util.ensure_dirs(mydir, uid=portage_uid,
+ gid=portage_gid, mode=0755)
+ writemsg_stdout(">>> Extracting info\n")
+ xptbz2.unpackinfo(infloc)
+ mysettings.load_infodir(infloc)
+ # Store the md5sum in the vdb.
+ fp = open(os.path.join(infloc, "BINPKGMD5"), "w")
+ fp.write(str(portage_checksum.perform_md5(mytbz2))+"\n")
+ fp.close()
+
+ debug = mysettings.get("PORTAGE_DEBUG", "") == "1"
+
+ # Eventually we'd like to pass in the saved ebuild env here.
+ retval = doebuild(myebuild, "setup", myroot, mysettings, debug=debug,
+ tree="bintree", mydbapi=mydbapi, vartree=vartree)
+ if retval != os.EX_OK:
+ writemsg("!!! Setup failed: %s\n" % retval, noiselevel=-1)
+ return retval
+
+ writemsg_stdout(">>> Extracting %s\n" % mypkg)
+ retval = portage_exec.spawn_bash(
+ "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
+ env=mysettings.environ())
+ if retval != os.EX_OK:
+ writemsg("!!! Error Extracting '%s'\n" % mytbz2, noiselevel=-1)
+ return retval
+ #portage_locks.unlockfile(tbz2_lock)
+ #tbz2_lock = None
+
+ mylink = dblink(mycat, mypkg, myroot, mysettings, vartree=vartree,
+ treetype="bintree")
+ retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0,
+ mydbapi=mydbapi, prev_mtimes=prev_mtimes)
+ return retval
+ finally:
+ if tbz2_lock:
+ portage_locks.unlockfile(tbz2_lock)
+ if builddir_lock:
+ try:
+ shutil.rmtree(builddir)
+ except (IOError, OSError), e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ portage_locks.unlockdir(builddir_lock)
+ try:
+ if not catdir_lock:
+ # Lock catdir for removal if empty.
+ catdir_lock = portage_locks.lockdir(catdir)
+ finally:
+ if catdir_lock:
+ try:
+ os.rmdir(catdir)
+ except OSError, e:
+ if e.errno != errno.ENOTEMPTY:
+ raise
+ del e
+ portage_locks.unlockdir(catdir_lock)
+
+def deprecated_profile_check():
+ if not os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
+ return False
+ deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
+ dcontent = deprecatedfile.readlines()
+ deprecatedfile.close()
+ newprofile = dcontent[0]
+ writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"),
+ noiselevel=-1)
+ writemsg(red("!!! Please upgrade to the following profile if possible:\n"),
+ noiselevel=-1)
+ writemsg(8*" "+green(newprofile)+"\n", noiselevel=-1)
+ if len(dcontent) > 1:
+ writemsg("To upgrade do the following steps:\n", noiselevel=-1)
+ for myline in dcontent[1:]:
+ writemsg(myline, noiselevel=-1)
+ writemsg("\n\n", noiselevel=-1)
+ return True
+
+# gets virtual package settings
+def getvirtuals(myroot):
+ global settings
+ writemsg("--- DEPRECATED call to getvirtual\n")
+ return settings.getvirtuals(myroot)
+
+def commit_mtimedb(mydict=None, filename=None):
+ if mydict is None:
+ global mtimedb
+ if "mtimedb" not in globals() or mtimedb is None:
+ return
+ mtimedb.commit()
+ return
+ if filename is None:
+ global mtimedbfile
+ filename = mtimedbfile
+ mydict["version"] = VERSION
+ d = {} # for full backward compat, pickle it as a plain dict object.
+ d.update(mydict)
+ try:
+ f = atomic_ofstream(filename)
+ cPickle.dump(d, f, -1)
+ f.close()
+ portage_util.apply_secpass_permissions(filename, uid=uid, gid=portage_gid, mode=0664)
+ except (IOError, OSError), e:
+ pass
+
+def portageexit():
+ global uid,portage_gid,portdb,db
+ if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
+ close_portdbapi_caches()
+ commit_mtimedb()
+
+atexit_register(portageexit)
+
+def global_updates(mysettings, trees, prev_mtimes):
+ """
+ Perform new global updates if they exist in $PORTDIR/profiles/updates/.
+
+ @param mysettings: A config instance for ROOT="/".
+ @type mysettings: config
+ @param trees: A dictionary containing portage trees.
+ @type trees: dict
+ @param prev_mtimes: A dictionary containing mtimes of files located in
+ $PORTDIR/profiles/updates/.
+ @type prev_mtimes: dict
+ @rtype: None or List
+ @return: None if no were no updates, otherwise a list of update commands
+ that have been performed.
+ """
+ # only do this if we're root and not running repoman/ebuild digest
+ global secpass
+ if secpass < 2 or "SANDBOX_ACTIVE" in os.environ:
+ return
+ updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")
+
+ try:
+ if mysettings["PORTAGE_CALLER"] == "fixpackages":
+ update_data = grab_updates(updpath)
+ else:
+ update_data = grab_updates(updpath, prev_mtimes)
+ except portage_exception.DirectoryNotFound:
+ writemsg("--- 'profiles/updates' is empty or not available. Empty portage tree?\n")
+ return
+ myupd = None
+ if len(update_data) > 0:
+ do_upgrade_packagesmessage = 0
+ myupd = []
+ timestamps = {}
+ for mykey, mystat, mycontent in update_data:
+ writemsg_stdout("\n\n")
+ writemsg_stdout(green("Performing Global Updates: ")+bold(mykey)+"\n")
+ writemsg_stdout("(Could take a couple of minutes if you have a lot of binary packages.)\n")
+ writemsg_stdout(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
+ valid_updates, errors = parse_updates(mycontent)
+ myupd.extend(valid_updates)
+ writemsg_stdout(len(valid_updates) * "." + "\n")
+ if len(errors) == 0:
+ # Update our internal mtime since we
+ # processed all of our directives.
+ timestamps[mykey] = long(mystat.st_mtime)
+ else:
+ for msg in errors:
+ writemsg("%s\n" % msg, noiselevel=-1)
+
+ update_config_files("/",
+ mysettings.get("CONFIG_PROTECT","").split(),
+ mysettings.get("CONFIG_PROTECT_MASK","").split(),
+ myupd)
+
+ trees["/"]["bintree"] = binarytree("/", mysettings["PKGDIR"],
+ settings=mysettings)
+ for update_cmd in myupd:
+ if update_cmd[0] == "move":
+ trees["/"]["vartree"].dbapi.move_ent(update_cmd)
+ trees["/"]["bintree"].move_ent(update_cmd)
+ elif update_cmd[0] == "slotmove":
+ trees["/"]["vartree"].dbapi.move_slot_ent(update_cmd)
+ trees["/"]["bintree"].move_slot_ent(update_cmd)
+
+ # The above global updates proceed quickly, so they
+ # are considered a single mtimedb transaction.
+ if len(timestamps) > 0:
+ # We do not update the mtime in the mtimedb
+ # until after _all_ of the above updates have
+ # been processed because the mtimedb will
+ # automatically commit when killed by ctrl C.
+ for mykey, mtime in timestamps.iteritems():
+ prev_mtimes[mykey] = mtime
+
+ # We gotta do the brute force updates for these now.
+ if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
+ "fixpackages" in mysettings.features:
+ trees["/"]["bintree"].update_ents(myupd)
+ else:
+ do_upgrade_packagesmessage = 1
+
+ # Update progress above is indicated by characters written to stdout so
+ # we print a couple new lines here to separate the progress output from
+ # what follows.
+ print
+ print
+
+ if do_upgrade_packagesmessage and \
+ listdir(os.path.join(mysettings["PKGDIR"], "All"), EmptyOnError=1):
+ writemsg_stdout(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
+ writemsg_stdout("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
+ writemsg_stdout("\n")
+ if myupd:
+ return myupd
+
+#continue setting up other trees
+
+class MtimeDB(dict):
+ def __init__(self, filename):
+ dict.__init__(self)
+ self.filename = filename
+ self._load(filename)
+
+ def _load(self, filename):
+ try:
+ f = open(filename)
+ mypickle = cPickle.Unpickler(f)
+ mypickle.find_global = None
+ d = mypickle.load()
+ f.close()
+ del f
+ except (IOError, OSError, EOFError, cPickle.UnpicklingError):
+ d = {}
+
+ if "old" in d:
+ d["updates"] = d["old"]
+ del d["old"]
+ if "cur" in d:
+ del d["cur"]
+
+ d.setdefault("starttime", 0)
+ d.setdefault("version", "")
+ for k in ("info", "ldpath", "updates"):
+ d.setdefault(k, {})
+
+ mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
+ "starttime", "updates", "version"))
+
+ for k in d.keys():
+ if k not in mtimedbkeys:
+ writemsg("Deleting invalid mtimedb key: %s\n" % str(k))
+ del d[k]
+ self.update(d)
+ self._clean_data = copy.deepcopy(d)
+
+ def commit(self):
+ if not self.filename:
+ return
+ d = {}
+ d.update(self)
+ # Only commit if the internal state has changed.
+ if d != self._clean_data:
+ commit_mtimedb(mydict=d, filename=self.filename)
+ self._clean_data = copy.deepcopy(d)
+
+def create_trees(config_root=None, target_root=None, trees=None):
+ if trees is None:
+ trees = {}
+ else:
+ # clean up any existing portdbapi instances
+ for myroot in trees:
+ portdb = trees[myroot]["porttree"].dbapi
+ portdb.close_caches()
+ portdbapi.portdbapi_instances.remove(portdb)
+ del trees[myroot]["porttree"], myroot, portdb
+
+ settings = config(config_root=config_root, target_root=target_root,
+ config_incrementals=portage_const.INCREMENTALS)
+ settings.lock()
+ settings.validate()
+
+ myroots = [(settings["ROOT"], settings)]
+ if settings["ROOT"] != "/":
+ settings = config(config_root=None, target_root=None,
+ config_incrementals=portage_const.INCREMENTALS)
+ settings.lock()
+ settings.validate()
+ myroots.append((settings["ROOT"], settings))
+
+ for myroot, mysettings in myroots:
+ trees[myroot] = portage_util.LazyItemsDict(trees.get(myroot, None))
+ trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals, myroot)
+ trees[myroot].addLazySingleton(
+ "vartree", vartree, myroot, categories=mysettings.categories,
+ settings=mysettings)
+ trees[myroot].addLazySingleton("porttree",
+ portagetree, myroot, settings=mysettings)
+ trees[myroot].addLazySingleton("bintree",
+ binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
+ return trees
+
+# Initialization of legacy globals. No functions/classes below this point
+# please! When the above functions and classes become independent of the
+# below global variables, it will be possible to make the below code
+# conditional on a backward compatibility flag (backward compatibility could
+# be disabled via an environment variable, for example). This will enable new
+# code that is aware of this flag to import portage without the unnecessary
+# overhead (and other issues!) of initializing the legacy globals.
+
+def init_legacy_globals():
+ global db, settings, root, portdb, selinux_enabled, mtimedbfile, mtimedb, \
+ archlist, features, groups, pkglines, thirdpartymirrors, usedefaults, \
+ profiledir, flushmtimedb
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(022)
+
+ kwargs = {}
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+ kwargs[k] = os.environ.get(envvar, "/")
+
+ db = create_trees(**kwargs)
+
+ settings = db["/"]["vartree"].settings
+ portdb = db["/"]["porttree"].dbapi
+
+ for myroot in db:
+ if myroot != "/":
+ settings = db[myroot]["vartree"].settings
+ portdb = db[myroot]["porttree"].dbapi
+ break
+
+ root = settings["ROOT"]
+
+ mtimedbfile = os.path.join("/", CACHE_PATH.lstrip(os.path.sep), "mtimedb")
+ mtimedb = MtimeDB(mtimedbfile)
+
+ # ========================================================================
+ # COMPATIBILITY
+ # These attributes should not be used
+ # within Portage under any circumstances.
+ # ========================================================================
+ archlist = settings.archlist()
+ features = settings.features
+ groups = settings["ACCEPT_KEYWORDS"].split()
+ pkglines = settings.packages
+ selinux_enabled = settings.selinux_enabled()
+ thirdpartymirrors = settings.thirdpartymirrors()
+ usedefaults = settings.use_defs
+ profiledir = None
+ if os.path.isdir(PROFILE_PATH):
+ profiledir = PROFILE_PATH
+ def flushmtimedb(record):
+ writemsg("portage.flushmtimedb() is DEPRECATED\n")
+ # ========================================================================
+ # COMPATIBILITY
+ # These attributes should not be used
+ # within Portage under any circumstances.
+ # ========================================================================
+
+# WARNING!
+# The PORTAGE_LEGACY_GLOBALS environment variable is reserved for internal
+# use within Portage. External use of this variable is unsupported because
+# it is experimental and it's behavior is likely to change.
+if "PORTAGE_LEGACY_GLOBALS" not in os.environ:
+ init_legacy_globals()
+
+# Clear the cache
+dircache={}
+
+# ============================================================================
+# ============================================================================
+
diff --git a/pym/portage/cache/__init__.py b/pym/portage/cache/__init__.py
new file mode 100644
index 00000000..cb1b59d6
--- /dev/null
+++ b/pym/portage/cache/__init__.py
@@ -0,0 +1,5 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
diff --git a/pym/portage/cache/anydbm.py b/pym/portage/cache/anydbm.py
new file mode 100644
index 00000000..a4e0003d
--- /dev/null
+++ b/pym/portage/cache/anydbm.py
@@ -0,0 +1,72 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+anydbm_module = __import__("anydbm")
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+import os
+from cache import fs_template
+from cache import cache_errors
+
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+ cleanse_keys = True
+ serialize_eclasses = False
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+
+ default_db = config.get("dbtype","anydbm")
+ if not default_db.startswith("."):
+ default_db = '.' + default_db
+
+ self._db_path = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+default_db)
+ self.__db = None
+ try:
+ self.__db = anydbm_module.open(self._db_path, "w", self._perms)
+
+ except anydbm_module.error:
+ # XXX handle this at some point
+ try:
+ self._ensure_dirs()
+ self._ensure_dirs(self._db_path)
+ except (OSError, IOError), e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ # try again if failed
+ try:
+ if self.__db == None:
+ self.__db = anydbm_module.open(self._db_path, "c", self._perms)
+ except anydbm_module.error, e:
+ raise cache_errors.InitializationError(self.__class__, e)
+ self._ensure_access(self._db_path)
+
+ def iteritems(self):
+ return self.__db.iteritems()
+
+ def _getitem(self, cpv):
+ # we override getitem because it's just a cpickling of the data handed in.
+ return pickle.loads(self.__db[cpv])
+
+ def _setitem(self, cpv, values):
+ self.__db[cpv] = pickle.dumps(values,pickle.HIGHEST_PROTOCOL)
+
+ def _delitem(self, cpv):
+ del self.__db[cpv]
+
+ def iterkeys(self):
+ return iter(self.__db.keys())
+
+ def __contains__(self, cpv):
+ return cpv in self.__db
+
+ def __del__(self):
+ if "__db" in self.__dict__ and self.__db != None:
+ self.__db.sync()
+ self.__db.close()
diff --git a/pym/portage/cache/cache_errors.py b/pym/portage/cache/cache_errors.py
new file mode 100644
index 00000000..f63e5994
--- /dev/null
+++ b/pym/portage/cache/cache_errors.py
@@ -0,0 +1,41 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+class CacheError(Exception): pass
+
+class InitializationError(CacheError):
+ def __init__(self, class_name, error):
+ self.error, self.class_name = error, class_name
+ def __str__(self):
+ return "Creation of instance %s failed due to %s" % \
+ (self.class_name, str(self.error))
+
+
+class CacheCorruption(CacheError):
+ def __init__(self, key, ex):
+ self.key, self.ex = key, ex
+ def __str__(self):
+ return "%s is corrupt: %s" % (self.key, str(self.ex))
+
+
+class GeneralCacheCorruption(CacheError):
+ def __init__(self,ex): self.ex = ex
+ def __str__(self): return "corruption detected: %s" % str(self.ex)
+
+
+class InvalidRestriction(CacheError):
+ def __init__(self, key, restriction, exception=None):
+ if exception == None: exception = ''
+ self.key, self.restriction, self.ex = key, restriction, ex
+ def __str__(self):
+ return "%s:%s is not valid: %s" % \
+ (self.key, self.restriction, str(self.ex))
+
+
+class ReadOnlyRestriction(CacheError):
+ def __init__(self, info=''):
+ self.info = info
+ def __str__(self):
+ return "cache is non-modifiable"+str(self.info)
diff --git a/pym/portage/cache/flat_hash.py b/pym/portage/cache/flat_hash.py
new file mode 100644
index 00000000..48e8a175
--- /dev/null
+++ b/pym/portage/cache/flat_hash.py
@@ -0,0 +1,120 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+from cache import fs_template
+from cache import cache_errors
+import errno, os, stat
+from cache.template import reconstruct_eclasses
+# store the current key order *here*.
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if not self.readonly and not os.path.exists(self.location):
+ self._ensure_dirs()
+
+ def __getitem__(self, cpv):
+ fp = os.path.join(self.location, cpv)
+ try:
+ myf = open(fp, "r")
+ try:
+ d = self._parse_data(myf, cpv)
+ if "_mtime_" not in d:
+ """Backward compatibility with old cache that uses mtime
+ mangling."""
+ d["_mtime_"] = long(os.fstat(myf.fileno()).st_mtime)
+ return d
+ finally:
+ myf.close()
+ except (IOError, OSError), e:
+ if e.errno != errno.ENOENT:
+ raise cache_errors.CacheCorruption(cpv, e)
+ raise KeyError(cpv)
+
+ def _parse_data(self, data, cpv):
+ try:
+ d = dict(map(lambda x:x.rstrip("\n").split("=", 1), data))
+ except ValueError, e:
+ # If a line is missing an "=", the split length is 1 instead of 2.
+ raise cache_errors.CacheCorruption(cpv, e)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"])
+ return d
+
+ for x in self._known_keys:
+ if x not in d:
+ d[x] = ''
+
+
+ return d
+
+
+ def _setitem(self, cpv, values):
+# import pdb;pdb.set_trace()
+ s = cpv.rfind("/")
+ fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try: myf=open(fp, "w")
+ except (IOError, OSError), e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf=open(fp,"w")
+ except (OSError, IOError),e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ for k, v in values.iteritems():
+ if k != "_mtime_" and (k == "_eclasses_" or k in self._known_keys):
+ myf.write("%s=%s\n" % (k, v))
+
+ myf.close()
+ self._ensure_access(fp, mtime=values["_mtime_"])
+
+ #update written. now we move it.
+
+ new_fp = os.path.join(self.location,cpv)
+ try: os.rename(fp, new_fp)
+ except (OSError, IOError), e:
+ os.remove(fp)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def _delitem(self, cpv):
+# import pdb;pdb.set_trace()
+ try:
+ os.remove(os.path.join(self.location,cpv))
+ except OSError, e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def __contains__(self, cpv):
+ return os.path.exists(os.path.join(self.location, cpv))
+
+
+ def iterkeys(self):
+ """generator for walking the dir struct"""
+ dirs = [self.location]
+ len_base = len(self.location)
+ while len(dirs):
+ for l in os.listdir(dirs[0]):
+ if l.endswith(".cpickle"):
+ continue
+ p = os.path.join(dirs[0],l)
+ st = os.lstat(p)
+ if stat.S_ISDIR(st.st_mode):
+ dirs.append(p)
+ continue
+ yield p[len_base+1:]
+ dirs.pop(0)
+
diff --git a/pym/portage/cache/flat_list.py b/pym/portage/cache/flat_list.py
new file mode 100644
index 00000000..85efa4c0
--- /dev/null
+++ b/pym/portage/cache/flat_list.py
@@ -0,0 +1,106 @@
+from cache import fs_template
+from cache import cache_errors
+import errno, os, stat
+
+# store the current key order *here*.
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ # do not screw with this ordering. _eclasses_ needs to be last
+ auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'IUSE', 'CDEPEND',
+ 'PDEPEND', 'PROVIDE','_eclasses_')
+
+ def __init__(self, label, auxdbkeys, **config):
+ super(database,self).__init__(label, auxdbkeys, **config)
+ self._base = os.path.join(self._base,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if len(self._known_keys) > len(self.auxdbkey_order) + 2:
+ raise Exception("less ordered keys then auxdbkeys")
+ if not os.path.exists(self._base):
+ self._ensure_dirs()
+
+
+ def _getitem(self, cpv):
+ d = {}
+ try:
+ myf = open(os.path.join(self._base, cpv),"r")
+ for k,v in zip(self.auxdbkey_order, myf):
+ d[k] = v.rstrip("\n")
+ except (OSError, IOError),e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try:
+ d["_mtime_"] = long(os.fstat(myf.fileno()).st_mtime)
+ except OSError, e:
+ myf.close()
+ raise cache_errors.CacheCorruption(cpv, e)
+ myf.close()
+ return d
+
+
+ def _setitem(self, cpv, values):
+ s = cpv.rfind("/")
+ fp=os.path.join(self._base,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try: myf=open(fp, "w")
+ except (OSError, IOError), e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf=open(fp,"w")
+ except (OSError, IOError),e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ for x in self.auxdbkey_order:
+ myf.write(values.get(x,"")+"\n")
+
+ myf.close()
+ self._ensure_access(fp, mtime=values["_mtime_"])
+ #update written. now we move it.
+ new_fp = os.path.join(self._base,cpv)
+ try: os.rename(fp, new_fp)
+ except (OSError, IOError), e:
+ os.remove(fp)
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def _delitem(self, cpv):
+ try:
+ os.remove(os.path.join(self._base,cpv))
+ except OSError, e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+
+ def __contains__(self, cpv):
+ return os.path.exists(os.path.join(self._base, cpv))
+
+
+ def iterkeys(self):
+ """generator for walking the dir struct"""
+ dirs = [self._base]
+ len_base = len(self._base)
+ while len(dirs):
+ for l in os.listdir(dirs[0]):
+ if l.endswith(".cpickle"):
+ continue
+ p = os.path.join(dirs[0],l)
+ st = os.lstat(p)
+ if stat.S_ISDIR(st.st_mode):
+ dirs.append(p)
+ continue
+ yield p[len_base+1:]
+ dirs.pop(0)
+
+
+ def commit(self): pass
diff --git a/pym/portage/cache/fs_template.py b/pym/portage/cache/fs_template.py
new file mode 100644
index 00000000..b76e98bd
--- /dev/null
+++ b/pym/portage/cache/fs_template.py
@@ -0,0 +1,74 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+import os
+from cache import template
+from portage_data import portage_gid
+
+class FsBased(template.database):
+ """template wrapping fs needed options, and providing _ensure_access as a way to
+ attempt to ensure files have the specified owners/perms"""
+
+ def __init__(self, *args, **config):
+ """throws InitializationError if needs args aren't specified
+ gid and perms aren't listed do to an oddity python currying mechanism
+ gid=portage_gid
+ perms=0665"""
+
+ for x,y in (("gid",portage_gid),("perms",0664)):
+ if x in config:
+ setattr(self, "_"+x, config[x])
+ del config[x]
+ else:
+ setattr(self, "_"+x, y)
+ super(FsBased, self).__init__(*args, **config)
+
+ if self.label.startswith(os.path.sep):
+ # normpath.
+ self.label = os.path.sep + os.path.normpath(self.label).lstrip(os.path.sep)
+
+
+ def _ensure_access(self, path, mtime=-1):
+ """returns true or false if it's able to ensure that path is properly chmod'd and chowned.
+ if mtime is specified, attempts to ensure that's correct also"""
+ try:
+ os.chown(path, -1, self._gid)
+ os.chmod(path, self._perms)
+ if mtime:
+ mtime=long(mtime)
+ os.utime(path, (mtime, mtime))
+ except OSError, IOError:
+ return False
+ return True
+
+ def _ensure_dirs(self, path=None):
+ """with path!=None, ensure beyond self.location. otherwise, ensure self.location"""
+ if path:
+ path = os.path.dirname(path)
+ base = self.location
+ else:
+ path = self.location
+ base='/'
+
+ for dir in path.lstrip(os.path.sep).rstrip(os.path.sep).split(os.path.sep):
+ base = os.path.join(base,dir)
+ if not os.path.exists(base):
+ um=os.umask(0)
+ try:
+ os.mkdir(base, self._perms | 0111)
+ os.chown(base, -1, self._gid)
+ finally:
+ os.umask(um)
+
+
+def gen_label(base, label):
+ """if supplied label is a path, generate a unique label based upon label, and supplied base path"""
+ if label.find(os.path.sep) == -1:
+ return label
+ label = label.strip("\"").strip("'")
+ label = os.path.join(*(label.rstrip(os.path.sep).split(os.path.sep)))
+ tail = os.path.split(label)[1]
+ return "%s-%X" % (tail, abs(label.__hash__()))
+
diff --git a/pym/portage/cache/mappings.py b/pym/portage/cache/mappings.py
new file mode 100644
index 00000000..9aa5a21e
--- /dev/null
+++ b/pym/portage/cache/mappings.py
@@ -0,0 +1,103 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+import UserDict
+
+class ProtectedDict(UserDict.DictMixin):
+ """
+ given an initial dict, this wraps that dict storing changes in a secondary dict, protecting
+ the underlying dict from changes
+ """
+ __slots__=("orig","new","blacklist")
+
+ def __init__(self, orig):
+ self.orig = orig
+ self.new = {}
+ self.blacklist = {}
+
+
+ def __setitem__(self, key, val):
+ self.new[key] = val
+ if key in self.blacklist:
+ del self.blacklist[key]
+
+
+ def __getitem__(self, key):
+ if key in self.new:
+ return self.new[key]
+ if key in self.blacklist:
+ raise KeyError(key)
+ return self.orig[key]
+
+
+ def __delitem__(self, key):
+ if key in self.new:
+ del self.new[key]
+ elif key in self.orig:
+ if key not in self.blacklist:
+ self.blacklist[key] = True
+ return
+ raise KeyError(key)
+
+
+ def __iter__(self):
+ for k in self.new.iterkeys():
+ yield k
+ for k in self.orig.iterkeys():
+ if k not in self.blacklist and k not in self.new:
+ yield k
+
+
+ def keys(self):
+ return list(self.__iter__())
+
+
+ def has_key(self, key):
+ return key in self.new or (key not in self.blacklist and key in self.orig)
+
+
+class LazyLoad(UserDict.DictMixin):
+ """
+ Lazy loading of values for a dict
+ """
+ __slots__=("pull", "d")
+
+ def __init__(self, pull_items_func, initial_items=[]):
+ self.d = {}
+ for k, v in initial_items:
+ self.d[k] = v
+ self.pull = pull_items_func
+
+ def __getitem__(self, key):
+ if key in self.d:
+ return self.d[key]
+ elif self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return self.d[key]
+
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def keys(self):
+ if self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return self.d.keys()
+
+
+ def has_key(self, key):
+ return key in self
+
+
+ def __contains__(self, key):
+ if key in self.d:
+ return True
+ elif self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return key in self.d
+
diff --git a/pym/portage/cache/metadata.py b/pym/portage/cache/metadata.py
new file mode 100644
index 00000000..df039d5e
--- /dev/null
+++ b/pym/portage/cache/metadata.py
@@ -0,0 +1,87 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+import os, stat, types
+from cache import flat_hash
+import eclass_cache
+from cache.template import reconstruct_eclasses
+from cache.mappings import ProtectedDict
+
+# this is the old cache format, flat_list. count maintained here.
+magic_line_count = 22
+
+# store the current key order *here*.
+class database(flat_hash.database):
+ complete_eclass_entries = False
+ auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
+ 'PDEPEND', 'PROVIDE', 'EAPI')
+
+ autocommits = True
+
+ def __init__(self, location, *args, **config):
+ loc = location
+ super(database, self).__init__(location, *args, **config)
+ self.location = os.path.join(loc, "metadata","cache")
+ self.ec = eclass_cache.cache(loc)
+
+ def __getitem__(self, cpv):
+ return flat_hash.database.__getitem__(self, cpv)
+
+
+ def _parse_data(self, data, cpv):
+ # easy attempt first.
+ data = list(data)
+ if len(data) != magic_line_count:
+ d = flat_hash.database._parse_data(self, data, cpv)
+ else:
+ # this one's interesting.
+ d = {}
+
+ for line in data:
+ # yes, meant to iterate over a string.
+ hashed = False
+ # poor mans enumerate. replace when python 2.3 is required
+ for idx, c in zip(range(len(line)), line):
+ if not c.isalpha():
+ if c == "=" and idx > 0:
+ hashed = True
+ d[line[:idx]] = line[idx + 1:].rstrip("\n")
+ elif c == "_" or c.isdigit():
+ continue
+ break
+
+ if not hashed:
+ # non hashed.
+ d.clear()
+ # poor mans enumerate. replace when python 2.3 is required
+ for idx, key in zip(range(len(self.auxdbkey_order)), self.auxdbkey_order):
+ d[key] = data[idx].strip()
+ break
+
+ if "_eclasses_" not in d:
+ if "INHERITED" in d:
+ d["_eclasses_"] = self.ec.get_eclass_data(d["INHERITED"].split(), from_master_only=True)
+ del d["INHERITED"]
+ elif isinstance(d["_eclasses_"], basestring):
+ # We skip this if flat_hash.database._parse_data() was called above
+ # because it calls reconstruct_eclasses() internally.
+ d["_eclasses_"] = reconstruct_eclasses(None, d["_eclasses_"])
+
+ return d
+
+
+
+ def _setitem(self, cpv, values):
+ values = ProtectedDict(values)
+
+ # hack. proper solution is to make this a __setitem__ override, since template.__setitem__
+ # serializes _eclasses_, then we reconstruct it.
+ if "_eclasses_" in values:
+ values["INHERITED"] = ' '.join(reconstruct_eclasses(cpv, values["_eclasses_"]).keys())
+ del values["_eclasses_"]
+
+ flat_hash.database._setitem(self, cpv, values)
diff --git a/pym/portage/cache/metadata_overlay.py b/pym/portage/cache/metadata_overlay.py
new file mode 100644
index 00000000..d82ba96f
--- /dev/null
+++ b/pym/portage/cache/metadata_overlay.py
@@ -0,0 +1,105 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+import time
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+from cache import template
+from cache.cache_errors import CacheCorruption
+from cache.flat_hash import database as db_rw
+from cache.metadata import database as db_ro
+
+class database(template.database):
+
+ serialize_eclasses = False
+
+ def __init__(self, location, label, auxdbkeys, db_rw=db_rw, db_ro=db_ro,
+ *args, **config):
+ super_config = config.copy()
+ super_config.pop("gid", None)
+ super(database, self).__init__(location, label, auxdbkeys,
+ *args, **super_config)
+ self.db_rw = db_rw(location, label, auxdbkeys, **config)
+ self.commit = self.db_rw.commit
+ self.autocommits = self.db_rw.autocommits
+ if isinstance(db_ro, type):
+ ro_config = config.copy()
+ ro_config["readonly"] = True
+ self.db_ro = db_ro(label, "metadata/cache", auxdbkeys, **ro_config)
+ else:
+ self.db_ro = db_ro
+
+ def __getitem__(self, cpv):
+ """funnel whiteout validation through here, since value needs to be fetched"""
+ try:
+ value = self.db_rw[cpv]
+ except KeyError:
+ return self.db_ro[cpv] # raises a KeyError when necessary
+ except CacheCorruption:
+ del self.db_rw[cpv]
+ return self.db_ro[cpv] # raises a KeyError when necessary
+ if self._is_whiteout(value):
+ if self._is_whiteout_valid(cpv, value):
+ raise KeyError(cpv)
+ else:
+ del self.db_rw[cpv]
+ return self.db_ro[cpv] # raises a KeyError when necessary
+ else:
+ return value
+
+ def _setitem(self, name, values):
+ value_ro = self.db_ro.get(name, None)
+ if value_ro is not None and \
+ self._are_values_identical(value_ro, values):
+ # we have matching values in the underlying db_ro
+ # so it is unnecessary to store data in db_rw
+ try:
+ del self.db_rw[name] # delete unwanted whiteout when necessary
+ except KeyError:
+ pass
+ return
+ self.db_rw[name] = values
+
+ def _delitem(self, cpv):
+ value = self[cpv] # validates whiteout and/or raises a KeyError when necessary
+ if self.db_ro.has_key(cpv):
+ self.db_rw[cpv] = self._create_whiteout(value)
+ else:
+ del self.db_rw[cpv]
+
+ def __contains__(self, cpv):
+ try:
+ self[cpv] # validates whiteout when necessary
+ except KeyError:
+ return False
+ return True
+
+ def iterkeys(self):
+ s = set()
+ for cpv in self.db_rw.iterkeys():
+ if self.has_key(cpv): # validates whiteout when necessary
+ yield cpv
+ # set includes whiteouts so they won't be yielded later
+ s.add(cpv)
+ for cpv in self.db_ro.iterkeys():
+ if cpv not in s:
+ yield cpv
+
+ def _is_whiteout(self, value):
+ return value["EAPI"] == "whiteout"
+
+ def _create_whiteout(self, value):
+ return {"EAPI":"whiteout","_eclasses_":value["_eclasses_"],"_mtime_":value["_mtime_"]}
+
+ def _is_whiteout_valid(self, name, value_rw):
+ try:
+ value_ro = self.db_ro[name]
+ return self._are_values_identical(value_rw,value_ro)
+ except KeyError:
+ return False
+
+ def _are_values_identical(self, value1, value2):
+ if long(value1["_mtime_"]) != long(value2["_mtime_"]):
+ return False
+ return value1["_eclasses_"] == value2["_eclasses_"]
diff --git a/pym/portage/cache/sql_template.py b/pym/portage/cache/sql_template.py
new file mode 100644
index 00000000..e635616e
--- /dev/null
+++ b/pym/portage/cache/sql_template.py
@@ -0,0 +1,275 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+from cache import template, cache_errors
+from cache.template import reconstruct_eclasses
+
+class SQLDatabase(template.database):
+ """template class for RDBM based caches
+
+ This class is designed such that derivatives don't have to change much code, mostly constant strings.
+ _BaseError must be an exception class that all Exceptions thrown from the derived RDBMS are derived
+ from.
+
+ SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the RDBMS, as should SCHEMA_PACKAGE_CREATE-
+ basically you need to deal with creation of a unique pkgid. If the dbapi2 rdbms class has a method of
+ recovering that id, then modify _insert_cpv to remove the extra select.
+
+ Creation of a derived class involves supplying _initdb_con, and table_exists.
+ Additionally, the default schemas may have to be modified.
+ """
+
+ SCHEMA_PACKAGE_NAME = "package_cache"
+ SCHEMA_PACKAGE_CREATE = "CREATE TABLE %s (\
+ pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME
+ SCHEMA_PACKAGE_DROP = "DROP TABLE %s" % SCHEMA_PACKAGE_NAME
+
+ SCHEMA_VALUES_NAME = "values_cache"
+ SCHEMA_VALUES_CREATE = "CREATE TABLE %s ( pkgid integer references %s (pkgid) on delete cascade, \
+ key varchar(255), value text, UNIQUE(pkgid, key))" % (SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME)
+ SCHEMA_VALUES_DROP = "DROP TABLE %s" % SCHEMA_VALUES_NAME
+ SCHEMA_INSERT_CPV_INTO_PACKAGE = "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME
+
+ _BaseError = ()
+ _dbClass = None
+
+ autocommits = False
+# cleanse_keys = True
+
+ # boolean indicating if the derived RDBMS class supports replace syntax
+ _supports_replace = False
+
+ def __init__(self, location, label, auxdbkeys, *args, **config):
+ """initialize the instance.
+ derived classes shouldn't need to override this"""
+
+ super(SQLDatabase, self).__init__(location, label, auxdbkeys, *args, **config)
+
+ config.setdefault("host","127.0.0.1")
+ config.setdefault("autocommit", self.autocommits)
+ self._initdb_con(config)
+
+ self.label = self._sfilter(self.label)
+
+
+ def _dbconnect(self, config):
+ """should be overridden if the derived class needs special parameters for initializing
+ the db connection, or cursor"""
+ self.db = self._dbClass(**config)
+ self.con = self.db.cursor()
+
+
+ def _initdb_con(self,config):
+ """ensure needed tables are in place.
+ If the derived class needs a different set of table creation commands, overload the approriate
+ SCHEMA_ attributes. If it needs additional execution beyond, override"""
+
+ self._dbconnect(config)
+ if not self._table_exists(self.SCHEMA_PACKAGE_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_PACKAGE_NAME)
+ try: self.con.execute(self.SCHEMA_PACKAGE_CREATE)
+ except self._BaseError, e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ if not self._table_exists(self.SCHEMA_VALUES_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_VALUES_NAME)
+ try: self.con.execute(self.SCHEMA_VALUES_CREATE)
+ except self._BaseError, e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+
+ def _table_exists(self, tbl):
+ """return true if a table exists
+ derived classes must override this"""
+ raise NotImplementedError
+
+
+ def _sfilter(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"")
+
+
+ def _getitem(self, cpv):
+ try: self.con.execute("SELECT key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label, self._sfilter(cpv)))
+ except self._BaseError, e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ rows = self.con.fetchall()
+
+ if len(rows) == 0:
+ raise KeyError(cpv)
+
+ vals = dict([(k,"") for k in self._known_keys])
+ vals.update(dict(rows))
+ return vals
+
+
+ def _delitem(self, cpv):
+ """delete a cpv cache entry
+ derived RDBM classes for this *must* either support cascaded deletes, or
+ override this method"""
+ try:
+ try:
+ self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ if self.autocommits:
+ self.commit()
+ except self._BaseError, e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+ if self.con.rowcount <= 0:
+ raise KeyError(cpv)
+ except Exception:
+ if not self.autocommits:
+ self.db.rollback()
+ # yes, this can roll back a lot more then just the delete. deal.
+ raise
+
+ def __del__(self):
+ # just to be safe.
+ if "db" in self.__dict__ and self.db != None:
+ self.commit()
+ self.db.close()
+
+ def _setitem(self, cpv, values):
+
+ try:
+ # insert.
+ try: pkgid = self._insert_cpv(cpv)
+ except self._BaseError, e:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ # __getitem__ fills out missing values,
+ # so we store only what's handed to us and is a known key
+ db_values = []
+ for key in self._known_keys:
+ if values.has_key(key) and values[key] != '':
+ db_values.append({"key":key, "value":values[key]})
+
+ if len(db_values) > 0:
+ try: self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \
+ (self.SCHEMA_VALUES_NAME, str(pkgid)), db_values)
+ except self._BaseError, e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ if self.autocommits:
+ self.commit()
+
+ except Exception:
+ if not self.autocommits:
+ try: self.db.rollback()
+ except self._BaseError: pass
+ raise
+
+
+ def _insert_cpv(self, cpv):
+ """uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded if the table definition
+ doesn't support auto-increment columns for pkgid.
+ returns the cpvs new pkgid
+ note this doesn't commit the transaction. The caller is expected to."""
+
+ cpv = self._sfilter(cpv)
+ if self._supports_replace:
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1)
+ else:
+ # just delete it.
+ try: del self[cpv]
+ except (cache_errors.CacheCorruption, KeyError): pass
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE
+ try:
+ self.con.execute(query_str % (self.label, cpv))
+ except self._BaseError:
+ self.db.rollback()
+ raise
+ self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, cpv))
+
+ if self.con.rowcount != 1:
+ raise cache_error.CacheCorruption(cpv, "Tried to insert the cpv, but found "
+ " %i matches upon the following select!" % len(rows))
+ return self.con.fetchone()[0]
+
+
+ def __contains__(self, cpv):
+ if not self.autocommits:
+ try: self.commit()
+ except self._BaseError, e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try: self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ except self._BaseError, e:
+ raise cache_errors.GeneralCacheCorruption(e)
+ return self.con.rowcount > 0
+
+
+ def iterkeys(self):
+ if not self.autocommits:
+ try: self.commit()
+ except self._BaseError, e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try: self.con.execute("SELECT cpv FROM %s WHERE label=%s" %
+ (self.SCHEMA_PACKAGE_NAME, self.label))
+ except self._BaseError, e:
+ raise cache_errors.GeneralCacheCorruption(e)
+# return [ row[0] for row in self.con.fetchall() ]
+ for x in self.con.fetchall():
+ yield x[0]
+
+ def iteritems(self):
+ try: self.con.execute("SELECT cpv, key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label))
+ except self._BaseError, e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ oldcpv = None
+ l = []
+ for x, y, v in self.con.fetchall():
+ if oldcpv != x:
+ if oldcpv != None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+ yield cpv, d
+ l.clear()
+ oldcpv = x
+ l.append((y,v))
+ if oldcpv != None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+ yield cpv, d
+
+ def commit(self):
+ self.db.commit()
+
+ def get_matches(self,match_dict):
+ query_list = []
+ for k,v in match_dict.items():
+ if k not in self._known_keys:
+ raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance")
+ v = v.replace("%","\\%")
+ v = v.replace(".*","%")
+ query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v)))
+
+ if len(query_list):
+ query = " AND "+" AND ".join(query_list)
+ else:
+ query = ''
+
+ print "query = SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % (self.label, query)
+ try: self.con.execute("SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % \
+ (self.label, query))
+ except self._BaseError, e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ return [ row[0] for row in self.con.fetchall() ]
+
diff --git a/pym/portage/cache/sqlite.py b/pym/portage/cache/sqlite.py
new file mode 100644
index 00000000..5c1bfa26
--- /dev/null
+++ b/pym/portage/cache/sqlite.py
@@ -0,0 +1,236 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+from cache import fs_template
+from cache import cache_errors
+import os
+from cache.template import reconstruct_eclasses
+from portage_util import writemsg, apply_secpass_permissions
+from portage_data import portage_gid
+try:
+ import sqlite3 as db_module # sqlite3 is optional with >=python-2.5
+except ImportError:
+ from pysqlite2 import dbapi2 as db_module
+DBError = db_module.Error
+
+class database(fs_template.FsBased):
+
+ autocommits = False
+ synchronous = False
+ # cache_bytes is used together with page_size (set at sqlite build time)
+ # to calculate the number of pages requested, according to the following
+ # equation: cache_bytes = page_bytes * page_count
+ cache_bytes = 1024 * 1024 * 10
+ _db_module = db_module
+ _db_error = DBError
+ _db_table = None
+
+ def __init__(self, *args, **config):
+ super(database, self).__init__(*args, **config)
+ self._allowed_keys = ["_mtime_", "_eclasses_"] + self._known_keys
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if not os.path.exists(self.location):
+ self._ensure_dirs()
+
+ config.setdefault("autocommit", self.autocommits)
+ config.setdefault("cache_bytes", self.cache_bytes)
+ config.setdefault("synchronous", self.synchronous)
+ # Timeout for throwing a "database is locked" exception (pysqlite
+ # default is 5.0 seconds).
+ config.setdefault("timeout", 15)
+ self._db_init_connection(config)
+ self._db_init_structures()
+
+ def _db_escape_string(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ return "'%s'" % str(s).replace("\\","\\\\").replace("'","''")
+
+ def _db_init_connection(self, config):
+ self._dbpath = self.location + ".sqlite"
+ #if os.path.exists(self._dbpath):
+ # os.unlink(self._dbpath)
+ connection_kwargs = {}
+ connection_kwargs["timeout"] = config["timeout"]
+ try:
+ self._ensure_dirs()
+ self._db_connection = self._db_module.connect(
+ database=self._dbpath, **connection_kwargs)
+ self._db_cursor = self._db_connection.cursor()
+ self._db_cursor.execute("PRAGMA encoding = %s" % self._db_escape_string("UTF-8"))
+ if not apply_secpass_permissions(self._dbpath, gid=portage_gid, mode=070, mask=02):
+ raise cache_errors.InitializationError(self.__class__, "can't ensure perms on %s" % self._dbpath)
+ self._db_init_cache_size(config["cache_bytes"])
+ self._db_init_synchronous(config["synchronous"])
+ except self._db_error, e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ def _db_init_structures(self):
+ self._db_table = {}
+ self._db_table["packages"] = {}
+ mytable = "portage_packages"
+ self._db_table["packages"]["table_name"] = mytable
+ self._db_table["packages"]["package_id"] = "internal_db_package_id"
+ self._db_table["packages"]["package_key"] = "portage_package_key"
+ self._db_table["packages"]["internal_columns"] = \
+ [self._db_table["packages"]["package_id"],
+ self._db_table["packages"]["package_key"]]
+ create_statement = []
+ create_statement.append("CREATE TABLE")
+ create_statement.append(mytable)
+ create_statement.append("(")
+ table_parameters = []
+ table_parameters.append("%s INTEGER PRIMARY KEY AUTOINCREMENT" % self._db_table["packages"]["package_id"])
+ table_parameters.append("%s TEXT" % self._db_table["packages"]["package_key"])
+ for k in self._allowed_keys:
+ table_parameters.append("%s TEXT" % k)
+ table_parameters.append("UNIQUE(%s)" % self._db_table["packages"]["package_key"])
+ create_statement.append(",".join(table_parameters))
+ create_statement.append(")")
+
+ self._db_table["packages"]["create"] = " ".join(create_statement)
+ self._db_table["packages"]["columns"] = \
+ self._db_table["packages"]["internal_columns"] + \
+ self._allowed_keys
+
+ cursor = self._db_cursor
+ for k, v in self._db_table.iteritems():
+ if self._db_table_exists(v["table_name"]):
+ create_statement = self._db_table_get_create(v["table_name"])
+ if create_statement != v["create"]:
+ writemsg("sqlite: dropping old table: %s\n" % v["table_name"])
+ cursor.execute("DROP TABLE %s" % v["table_name"])
+ cursor.execute(v["create"])
+ else:
+ cursor.execute(v["create"])
+
+ def _db_table_exists(self, table_name):
+ """return true/false dependant on a tbl existing"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT name FROM sqlite_master WHERE type=\"table\" AND name=%s" % \
+ self._db_escape_string(table_name))
+ return len(cursor.fetchall()) == 1
+
+ def _db_table_get_create(self, table_name):
+ """return true/false dependant on a tbl existing"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT sql FROM sqlite_master WHERE name=%s" % \
+ self._db_escape_string(table_name))
+ return cursor.fetchall()[0][0]
+
+ def _db_init_cache_size(self, cache_bytes):
+ cursor = self._db_cursor
+ cursor.execute("PRAGMA page_size")
+ page_size=int(cursor.fetchone()[0])
+ # number of pages, sqlite default is 2000
+ cache_size = cache_bytes / page_size
+ cursor.execute("PRAGMA cache_size = %d" % cache_size)
+ cursor.execute("PRAGMA cache_size")
+ actual_cache_size = int(cursor.fetchone()[0])
+ del cursor
+ if actual_cache_size != cache_size:
+ raise cache_errors.InitializationError(self.__class__,"actual cache_size = "+actual_cache_size+" does does not match requested size of "+cache_size)
+
+ def _db_init_synchronous(self, synchronous):
+ cursor = self._db_cursor
+ cursor.execute("PRAGMA synchronous = %d" % synchronous)
+ cursor.execute("PRAGMA synchronous")
+ actual_synchronous=int(cursor.fetchone()[0])
+ del cursor
+ if actual_synchronous!=synchronous:
+ raise cache_errors.InitializationError(self.__class__,"actual synchronous = "+actual_synchronous+" does does not match requested value of "+synchronous)
+
+ def __getitem__(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute("select * from %s where %s=%s" % \
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv)))
+ result = cursor.fetchall()
+ if len(result) == 1:
+ pass
+ elif len(result) == 0:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, "key is not unique")
+ d = {}
+ internal_columns = self._db_table["packages"]["internal_columns"]
+ column_index = -1
+ for k in self._db_table["packages"]["columns"]:
+ column_index +=1
+ if k not in internal_columns:
+ d[k] = result[0][column_index]
+ # XXX: The resolver chokes on unicode strings so we convert them here.
+ for k in d.keys():
+ try:
+ d[k]=str(d[k]) # convert unicode strings to normal
+ except UnicodeEncodeError, e:
+ pass #writemsg("%s: %s\n" % (cpv, str(e)))
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"])
+ for x in self._known_keys:
+ d.setdefault(x,'')
+ return d
+
+ def _setitem(self, cpv, values):
+ update_statement = []
+ update_statement.append("REPLACE INTO %s" % self._db_table["packages"]["table_name"])
+ update_statement.append("(")
+ update_statement.append(','.join([self._db_table["packages"]["package_key"]] + self._allowed_keys))
+ update_statement.append(")")
+ update_statement.append("VALUES")
+ update_statement.append("(")
+ values_parameters = []
+ values_parameters.append(self._db_escape_string(cpv))
+ for k in self._allowed_keys:
+ values_parameters.append(self._db_escape_string(values.get(k, '')))
+ update_statement.append(",".join(values_parameters))
+ update_statement.append(")")
+ cursor = self._db_cursor
+ try:
+ s = " ".join(update_statement)
+ cursor.execute(s)
+ except self._db_error, e:
+ writemsg("%s: %s\n" % (cpv, str(e)))
+ raise
+
+ def commit(self):
+ self._db_connection.commit()
+
+ def _delitem(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute("DELETE FROM %s WHERE %s=%s" % \
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv)))
+
+ def __contains__(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute(" ".join(
+ ["SELECT %s FROM %s" %
+ (self._db_table["packages"]["package_id"],
+ self._db_table["packages"]["table_name"]),
+ "WHERE %s=%s" % (
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv))]))
+ result = cursor.fetchall()
+ if len(result) == 0:
+ return False
+ elif len(result) == 1:
+ return True
+ else:
+ raise cache_errors.CacheCorruption(cpv, "key is not unique")
+
+ def iterkeys(self):
+ """generator for walking the dir struct"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT %s FROM %s" % \
+ (self._db_table["packages"]["package_key"],
+ self._db_table["packages"]["table_name"]))
+ result = cursor.fetchall()
+ key_list = [x[0] for x in result]
+ del result
+ while key_list:
+ yield key_list.pop()
diff --git a/pym/portage/cache/template.py b/pym/portage/cache/template.py
new file mode 100644
index 00000000..4ffd9b9e
--- /dev/null
+++ b/pym/portage/cache/template.py
@@ -0,0 +1,200 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+from cache import cache_errors
+from cache.cache_errors import InvalidRestriction
+from cache.mappings import ProtectedDict
+
+class database(object):
+ # this is for metadata/cache transfer.
+ # basically flags the cache needs be updated when transfered cache to cache.
+ # leave this.
+
+ complete_eclass_entries = True
+ autocommits = False
+ cleanse_keys = False
+ serialize_eclasses = True
+
+ def __init__(self, location, label, auxdbkeys, readonly=False):
+ """ initialize the derived class; specifically, store label/keys"""
+ self._known_keys = auxdbkeys
+ self.location = location
+ self.label = label
+ self.readonly = readonly
+ self.sync_rate = 0
+ self.updates = 0
+
+ def __getitem__(self, cpv):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the __eclasses__ conversion.
+ that said, if the class handles it, they can override it."""
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+ d=self._getitem(cpv)
+ if self.serialize_eclasses and "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"])
+ return d
+
+ def _getitem(self, cpv):
+ """get cpv's values.
+ override this in derived classess"""
+ raise NotImplementedError
+
+ def __setitem__(self, cpv, values):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ if self.cleanse_keys:
+ d=ProtectedDict(values)
+ for k in d.keys():
+ if d[k] == '':
+ del d[k]
+ if self.serialize_eclasses and "_eclasses_" in values:
+ d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
+ elif self.serialize_eclasses and "_eclasses_" in values:
+ d = ProtectedDict(values)
+ d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
+ else:
+ d = values
+ self._setitem(cpv, d)
+ if not self.autocommits:
+ self.updates += 1
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _setitem(self, name, values):
+ """__setitem__ calls this after readonly checks. override it in derived classes
+ note _eclassees_ key *must* be handled"""
+ raise NotImplementedError
+
+ def __delitem__(self, cpv):
+ """delete a key from the cache.
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ if not self.autocommits:
+ self.updates += 1
+ self._delitem(cpv)
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _delitem(self,cpv):
+ """__delitem__ calls this after readonly checks. override it in derived classes"""
+ raise NotImplementedError
+
+ def has_key(self, cpv):
+ return cpv in self
+
+ def keys(self):
+ return tuple(self.iterkeys())
+
+ def iterkeys(self):
+ raise NotImplementedError
+
+ def iteritems(self):
+ for x in self.iterkeys():
+ yield (x, self[x])
+
+ def items(self):
+ return list(self.iteritems())
+
+ def sync(self, rate=0):
+ self.sync_rate = rate
+ if(rate == 0):
+ self.commit()
+
+ def commit(self):
+ if not self.autocommits:
+ raise NotImplementedError
+
+ def __contains__(self, cpv):
+ """This method should always be overridden. It is provided only for
+ backward compatibility with modules that override has_key instead. It
+ will automatically raise a NotImplementedError if has_key has not been
+ overridden."""
+ if self.has_key is database.has_key:
+ # prevent a possible recursive loop
+ raise NotImplementedError
+ return self.has_key(cpv)
+
+ def get(self, k, x=None):
+ try:
+ return self[k]
+ except KeyError:
+ return x
+
+ def get_matches(self, match_dict):
+ """generic function for walking the entire cache db, matching restrictions to
+ filter what cpv's are returned. Derived classes should override this if they
+ can implement a faster method then pulling each cpv:values, and checking it.
+
+ For example, RDBMS derived classes should push the matching logic down to the
+ actual RDBM."""
+
+ import re
+ restricts = {}
+ for key,match in match_dict.iteritems():
+ # XXX this sucks.
+ try:
+ if isinstance(match, str):
+ restricts[key] = re.compile(match).match
+ else:
+ restricts[key] = re.compile(match[0],match[1]).match
+ except re.error, e:
+ raise InvalidRestriction(key, match, e)
+ if key not in self.__known_keys:
+ raise InvalidRestriction(key, match, "Key isn't valid")
+
+ for cpv in self.keys():
+ cont = True
+ vals = self[cpv]
+ for key, match in restricts.iteritems():
+ if not match(vals[key]):
+ cont = False
+ break
+ if cont:
+ yield cpv
+
+
+def serialize_eclasses(eclass_dict):
+ """takes a dict, returns a string representing said dict"""
+ """The "new format", which causes older versions of <portage-2.1.2 to
+ traceback with a ValueError due to failed long() conversion. This format
+ isn't currently written, but the the capability to read it is already built
+ in.
+ return "\t".join(["%s\t%s" % (k, str(v)) \
+ for k, v in eclass_dict.iteritems()])
+ """
+ if not eclass_dict:
+ return ""
+ return "\t".join(["%s\t%s\t%s" % (k, v[0], str(v[1])) \
+ for k, v in eclass_dict.iteritems()])
+
+def reconstruct_eclasses(cpv, eclass_string):
+ """returns a dict when handed a string generated by serialize_eclasses"""
+ eclasses = eclass_string.rstrip().lstrip().split("\t")
+ if eclasses == [""]:
+ # occasionally this occurs in the fs backends. they suck.
+ return {}
+
+ if len(eclasses) % 2 != 0 and len(eclasses) % 3 != 0:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
+ d={}
+ try:
+ if eclasses[1].isdigit():
+ for x in xrange(0, len(eclasses), 2):
+ d[eclasses[x]] = ("", long(eclasses[x + 1]))
+ else:
+ # The old format contains paths that will be discarded.
+ for x in xrange(0, len(eclasses), 3):
+ d[eclasses[x]] = (eclasses[x + 1], long(eclasses[x + 2]))
+ except ValueError:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ mtime conversion to long failed")
+ del eclasses
+ return d
diff --git a/pym/portage/cache/util.py b/pym/portage/cache/util.py
new file mode 100644
index 00000000..6393deef
--- /dev/null
+++ b/pym/portage/cache/util.py
@@ -0,0 +1,129 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+from itertools import chain
+from cache import cache_errors
+
+def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, eclass_cache=None, verbose_instance=None):
+
+ if not src_cache.complete_eclass_entries and not eclass_cache:
+ raise Exception("eclass_cache required for cache's of class %s!" % src_cache.__class__)
+
+ if verbose_instance == None:
+ noise=quiet_mirroring()
+ else:
+ noise=verbose_instance
+
+ dead_nodes = {}
+ dead_nodes = dict.fromkeys(trg_cache.keys())
+ count=0
+
+ if not trg_cache.autocommits:
+ trg_cache.sync(100)
+
+ for x in valid_nodes_iterable:
+# print "processing x=",x
+ count+=1
+ if dead_nodes.has_key(x):
+ del dead_nodes[x]
+ try: entry = src_cache[x]
+ except KeyError, e:
+ noise.missing_entry(x)
+ del e
+ continue
+ write_it = True
+ trg = None
+ try:
+ trg = trg_cache[x]
+ if long(trg["_mtime_"]) == long(entry["_mtime_"]) and eclass_cache.is_eclass_data_valid(trg["_eclasses_"]):
+ write_it = False
+ except (cache_errors.CacheError, KeyError):
+ pass
+
+ if trg and not write_it:
+ """ We don't want to skip the write unless we're really sure that
+ the existing cache is identical, so don't trust _mtime_ and
+ _eclasses_ alone."""
+ for d in (entry, trg):
+ if "EAPI" in d and d["EAPI"] in ("", "0"):
+ del d["EAPI"]
+ for k in set(chain(entry, trg)).difference(
+ ("_mtime_", "_eclasses_")):
+ if trg.get(k, "") != entry.get(k, ""):
+ write_it = True
+ break
+
+ if write_it:
+ try:
+ inherited = entry.get("INHERITED", None)
+ except cache_errors.CacheError, ce:
+ noise.exception(x, ce)
+ del ce
+ continue
+ if inherited:
+ if src_cache.complete_eclass_entries:
+ if not "_eclasses_" in entry:
+ noise.corruption(x,"missing _eclasses_ field")
+ continue
+ if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]):
+ noise.eclass_stale(x)
+ continue
+ else:
+ entry["_eclasses_"] = eclass_cache.get_eclass_data(entry["INHERITED"].split(), \
+ from_master_only=True)
+ if not entry["_eclasses_"]:
+ noise.eclass_stale(x)
+ continue
+
+ # by this time, if it reaches here, the eclass has been validated, and the entry has
+ # been updated/translated (if needs be, for metadata/cache mainly)
+ try: trg_cache[x] = entry
+ except cache_errors.CacheError, ce:
+ noise.exception(x, ce)
+ del ce
+ continue
+ if count >= noise.call_update_min:
+ noise.update(x)
+ count = 0
+
+ if not trg_cache.autocommits:
+ trg_cache.commit()
+
+ # ok. by this time, the trg_cache is up to date, and we have a dict
+ # with a crapload of cpv's. we now walk the target db, removing stuff if it's in the list.
+ for key in dead_nodes:
+ try:
+ del trg_cache[key]
+ except KeyError:
+ pass
+ except cache_errors.CacheError, ce:
+ noise.exception(ce)
+ del ce
+ dead_nodes.clear()
+ noise.finish()
+
+
+class quiet_mirroring(object):
+ # call_update_every is used by mirror_cache to determine how often to call in.
+ # quiet defaults to 2^24 -1. Don't call update, 'cept once every 16 million or so :)
+ call_update_min = 0xffffff
+ def update(self,key,*arg): pass
+ def exception(self,key,*arg): pass
+ def eclass_stale(self,*arg): pass
+ def missing_entry(self, key): pass
+ def misc(self,key,*arg): pass
+ def corruption(self, key, s): pass
+ def finish(self, *arg): pass
+
+class non_quiet_mirroring(quiet_mirroring):
+ call_update_min=1
+ def update(self,key,*arg): print "processed",key
+ def exception(self, key, *arg): print "exec",key,arg
+ def missing(self,key): print "key %s is missing", key
+ def corruption(self,key,*arg): print "corrupt %s:" % key,arg
+ def eclass_stale(self,key,*arg):print "stale %s:"%key,arg
+
diff --git a/pym/portage/cache/volatile.py b/pym/portage/cache/volatile.py
new file mode 100644
index 00000000..0a204b70
--- /dev/null
+++ b/pym/portage/cache/volatile.py
@@ -0,0 +1,27 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+import copy
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+from cache import template
+
+class database(template.database):
+
+ autocommits = True
+ serialize_eclasses = False
+
+ def __init__(self, *args, **config):
+ config.pop("gid", None)
+ super(database, self).__init__(*args, **config)
+ self._data = {}
+ self.iterkeys = self._data.iterkeys
+ self._delitem = self._data.__delitem__
+ self.__contains__ = self._data.__contains__
+
+ def _setitem(self, name, values):
+ self._data[name] = copy.deepcopy(values)
+
+ def _getitem(self, cpv):
+ return copy.deepcopy(self._data[cpv])
diff --git a/pym/portage/checksum.py b/pym/portage/checksum.py
new file mode 100644
index 00000000..7f1a89c8
--- /dev/null
+++ b/pym/portage/checksum.py
@@ -0,0 +1,219 @@
+# portage_checksum.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+from portage_const import PRIVATE_PATH,PRELINK_BINARY,HASHING_BLOCKSIZE
+import os
+import errno
+import shutil
+import stat
+import portage_exception
+import portage_exec
+import portage_util
+import portage_locks
+import commands
+import sha
+
+
+# actual hash functions first
+
+#dict of all available hash functions
+hashfunc_map = {}
+
+# We _try_ to load this module. If it fails we do the slightly slower fallback.
+try:
+ import fchksum
+
+ def md5hash(filename):
+ return fchksum.fmd5t(filename)
+
+except ImportError:
+ import md5
+ def md5hash(filename):
+ return pyhash(filename, md5)
+hashfunc_map["MD5"] = md5hash
+
+def sha1hash(filename):
+ return pyhash(filename, sha)
+hashfunc_map["SHA1"] = sha1hash
+
+# Keep pycrypto optional for now, there are no internal fallbacks for these
+try:
+ import Crypto.Hash.SHA256
+
+ def sha256hash(filename):
+ return pyhash(filename, Crypto.Hash.SHA256)
+ hashfunc_map["SHA256"] = sha256hash
+except ImportError:
+ pass
+
+try:
+ import Crypto.Hash.RIPEMD
+
+ def rmd160hash(filename):
+ return pyhash(filename, Crypto.Hash.RIPEMD)
+ hashfunc_map["RMD160"] = rmd160hash
+except ImportError:
+ pass
+
+def getsize(filename):
+ size = os.stat(filename).st_size
+ return (size, size)
+hashfunc_map["size"] = getsize
+
+# end actual hash functions
+
+prelink_capable = False
+if os.path.exists(PRELINK_BINARY):
+ results = commands.getstatusoutput(PRELINK_BINARY+" --version > /dev/null 2>&1")
+ if (results[0] >> 8) == 0:
+ prelink_capable=1
+ del results
+
+def perform_md5(x, calc_prelink=0):
+ return perform_checksum(x, "MD5", calc_prelink)[0]
+
+def perform_all(x, calc_prelink=0):
+ mydict = {}
+ for k in hashfunc_map.keys():
+ mydict[k] = perform_checksum(x, hashfunc_map[k], calc_prelink)[0]
+ return mydict
+
+def get_valid_checksum_keys():
+ return hashfunc_map.keys()
+
+def verify_all(filename, mydict, calc_prelink=0, strict=0):
+ """
+ Verify all checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
+ @type strict: Integer
+ @rtype: Tuple
+ @return: Result of the checks and possible message:
+ 1) If size fails, False, and a tuple containing a message, the given size, and the actual size
+ 2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
+ 3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
+ 4) If all checks succeed, return True and a fake reason
+ """
+ # Dict relates to single file only.
+ # returns: (passed,reason)
+ file_is_ok = True
+ reason = "Reason unknown"
+ try:
+ mysize = os.stat(filename)[stat.ST_SIZE]
+ if mydict["size"] != mysize:
+ return False,("Filesize does not match recorded size", mysize, mydict["size"])
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise portage_exception.FileNotFound(filename)
+ return False, (str(e), None, None)
+ for x in mydict.keys():
+ if x == "size":
+ continue
+ elif x in hashfunc_map.keys():
+ myhash = perform_checksum(filename, x, calc_prelink=calc_prelink)[0]
+ if mydict[x] != myhash:
+ if strict:
+ raise portage_exception.DigestException, "Failed to verify '$(file)s' on checksum type '%(type)s'" % {"file":filename, "type":x}
+ else:
+ file_is_ok = False
+ reason = (("Failed on %s verification" % x), myhash,mydict[x])
+ break
+ return file_is_ok,reason
+
+def pyhash(filename, hashobject):
+ """
+ Run a checksum against a file.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @param hashname: The hash object that will execute the checksum on the file
+ @type hashname: Object
+ @return: The hash and size of the data
+ """
+ f = open(filename, 'rb')
+ blocksize = HASHING_BLOCKSIZE
+ data = f.read(blocksize)
+ size = 0L
+ sum = hashobject.new()
+ while data:
+ sum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+ f.close()
+
+ return (sum.hexdigest(), size)
+
+def perform_checksum(filename, hashname="MD5", calc_prelink=0):
+ """
+ Run a specific checksum against a file.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @param hashname: The type of hash function to run
+ @type hashname: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: The hash and size of the data
+ """
+ myfilename = filename[:]
+ prelink_tmpfile = os.path.join("/", PRIVATE_PATH, "prelink-checksum.tmp." + str(os.getpid()))
+ mylock = None
+ try:
+ if calc_prelink and prelink_capable:
+ mylock = portage_locks.lockfile(prelink_tmpfile, wantnewlockfile=1)
+ # Create non-prelinked temporary file to checksum.
+ # Files rejected by prelink are summed in place.
+ retval = portage_exec.spawn([PRELINK_BINARY, "--undo", "-o",
+ prelink_tmpfile, filename], fd_pipes={})
+ if retval == os.EX_OK:
+ myfilename = prelink_tmpfile
+ try:
+ if hashname not in hashfunc_map:
+ raise portage_exception.DigestException(hashname + \
+ " hash function not available (needs dev-python/pycrypto)")
+ myhash, mysize = hashfunc_map[hashname](myfilename)
+ except (OSError, IOError), e:
+ if e.errno == errno.ENOENT:
+ raise portage_exception.FileNotFound(myfilename)
+ raise
+ if calc_prelink and prelink_capable:
+ try:
+ os.unlink(prelink_tmpfile)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ return myhash, mysize
+ finally:
+ if mylock:
+ portage_locks.unlockfile(mylock)
+
+def perform_multiple_checksums(filename, hashes=["MD5"], calc_prelink=0):
+ """
+ Run a group of checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param hashes: A list of checksum functions to run against the file
+ @type hashname: List
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: A dictionary in the form:
+ return_value[hash_name] = (hash_result,size)
+ for each given checksum
+ """
+ rVal = {}
+ for x in hashes:
+ if x not in hashfunc_map:
+ raise portage_exception.DigestException, x+" hash function not available (needs dev-python/pycrypto)"
+ rVal[x] = perform_checksum(filename, x, calc_prelink)[0]
+ return rVal
diff --git a/pym/portage/const.py b/pym/portage/const.py
new file mode 100644
index 00000000..e1af7cb4
--- /dev/null
+++ b/pym/portage/const.py
@@ -0,0 +1,65 @@
+# portage: Constants
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+# ===========================================================================
+# START OF CONSTANTS -- START OF CONSTANTS -- START OF CONSTANTS -- START OF
+# ===========================================================================
+
+import os
+
+VDB_PATH = "var/db/pkg"
+PRIVATE_PATH = "var/lib/portage"
+CACHE_PATH = "/var/cache/edb"
+DEPCACHE_PATH = CACHE_PATH+"/dep"
+
+USER_CONFIG_PATH = "/etc/portage"
+MODULES_FILE_PATH = USER_CONFIG_PATH+"/modules"
+CUSTOM_PROFILE_PATH = USER_CONFIG_PATH+"/profile"
+
+#PORTAGE_BASE_PATH = "/usr/lib/portage"
+PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(__file__.split(os.sep)[:-2]))
+PORTAGE_BIN_PATH = PORTAGE_BASE_PATH+"/bin"
+PORTAGE_PYM_PATH = PORTAGE_BASE_PATH+"/pym"
+NEWS_LIB_PATH = "/var/lib/gentoo"
+PROFILE_PATH = "/etc/make.profile"
+LOCALE_DATA_PATH = PORTAGE_BASE_PATH+"/locale"
+
+EBUILD_SH_BINARY = PORTAGE_BIN_PATH+"/ebuild.sh"
+MISC_SH_BINARY = PORTAGE_BIN_PATH + "/misc-functions.sh"
+SANDBOX_BINARY = "/usr/bin/sandbox"
+BASH_BINARY = "/bin/bash"
+MOVE_BINARY = "/bin/mv"
+PRELINK_BINARY = "/usr/sbin/prelink"
+
+WORLD_FILE = PRIVATE_PATH + "/world"
+MAKE_CONF_FILE = "/etc/make.conf"
+MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults"
+DEPRECATED_PROFILE_FILE = PROFILE_PATH+"/deprecated"
+USER_VIRTUALS_FILE = USER_CONFIG_PATH+"/virtuals"
+EBUILD_SH_ENV_FILE = USER_CONFIG_PATH+"/bashrc"
+INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env"
+CUSTOM_MIRRORS_FILE = USER_CONFIG_PATH+"/mirrors"
+CONFIG_MEMORY_FILE = PRIVATE_PATH + "/config"
+COLOR_MAP_FILE = USER_CONFIG_PATH + "/color.map"
+
+REPO_NAME_FILE = "repo_name"
+REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
+
+INCREMENTALS=["USE","USE_EXPAND","USE_EXPAND_HIDDEN","FEATURES","ACCEPT_KEYWORDS","ACCEPT_LICENSE","CONFIG_PROTECT_MASK","CONFIG_PROTECT","PRELINK_PATH","PRELINK_PATH_MASK"]
+EBUILD_PHASES = ["setup", "unpack", "compile", "test", "install",
+ "preinst", "postinst", "prerm", "postrm", "other"]
+
+EAPI = 0
+
+HASHING_BLOCKSIZE = 32768
+MANIFEST1_HASH_FUNCTIONS = ["MD5","SHA256","RMD160"]
+MANIFEST2_HASH_FUNCTIONS = ["SHA1","SHA256","RMD160"]
+MANIFEST2_REQUIRED_HASH = "SHA1"
+
+MANIFEST2_IDENTIFIERS = ["AUX","MISC","DIST","EBUILD"]
+# ===========================================================================
+# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
+# ===========================================================================
diff --git a/pym/portage/cvstree.py b/pym/portage/cvstree.py
new file mode 100644
index 00000000..30f143cd
--- /dev/null
+++ b/pym/portage/cvstree.py
@@ -0,0 +1,295 @@
+# cvstree.py -- cvs tree utilities
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+import os,time,sys,re
+from stat import *
+
+# [D]/Name/Version/Date/Flags/Tags
+
+def pathdata(entries, path):
+ """(entries,path)
+ Returns the data(dict) for a specific file/dir at the path specified."""
+ mysplit=path.split("/")
+ myentries=entries
+ mytarget=mysplit[-1]
+ mysplit=mysplit[:-1]
+ for mys in mysplit:
+ if myentries["dirs"].has_key(mys):
+ myentries=myentries["dirs"][mys]
+ else:
+ return None
+ if myentries["dirs"].has_key(mytarget):
+ return myentries["dirs"][mytarget]
+ elif myentries["files"].has_key(mytarget):
+ return myentries["files"][mytarget]
+ else:
+ return None
+
+def fileat(entries, path):
+ return pathdata(entries,path)
+
+def isadded(entries, path):
+ """(entries,path)
+ Returns true if the path exists and is added to the cvs tree."""
+ mytarget=pathdata(entries, path)
+ if mytarget:
+ if "cvs" in mytarget["status"]:
+ return 1
+
+ basedir=os.path.dirname(path)
+ filename=os.path.basename(path)
+
+ try:
+ myfile=open(basedir+"/CVS/Entries","r")
+ except IOError:
+ return 0
+ mylines=myfile.readlines()
+ myfile.close()
+
+ rep=re.compile("^\/"+re.escape(filename)+"\/");
+ for x in mylines:
+ if rep.search(x):
+ return 1
+
+ return 0
+
+def findnew(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that have been added but
+ have not yet been committed. Returns a list of paths, optionally prepended
+ with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "0" == entries["files"][myfile]["revision"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findnew(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findchanged(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that exist in the cvs tree
+ and differ from the committed version. Returns a list of paths, optionally
+ prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "current" not in entries["files"][myfile]["status"]:
+ if "exists" in entries["files"][myfile]["status"]:
+ if entries["files"][myfile]["revision"]!="0":
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findchanged(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findmissing(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are listed in the cvs
+ tree but do not exist on the filesystem. Returns a list of paths,
+ optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "exists" not in entries["files"][myfile]["status"]:
+ if "removed" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findmissing(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findunadded(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are in valid cvs
+ directories but are not part of the cvs tree. Returns a list of paths,
+ optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+
+ #ignore what cvs ignores.
+ for myfile in entries["files"].keys():
+ if "cvs" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findunadded(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findremoved(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are in flagged for cvs
+ deletions. Returns a list of paths, optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "removed" in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findremoved(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findall(entries, recursive=0, basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all new, changed, missing, and unadded
+ entities. Returns a 4 element list of lists as returned from each find*()."""
+
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mynew = findnew(entries,recursive,basedir)
+ mychanged = findchanged(entries,recursive,basedir)
+ mymissing = findmissing(entries,recursive,basedir)
+ myunadded = findunadded(entries,recursive,basedir)
+ myremoved = findremoved(entries,recursive,basedir)
+ return [mynew, mychanged, mymissing, myunadded, myremoved]
+
+ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
+def apply_cvsignore_filter(list):
+ x=0
+ while x < len(list):
+ if ignore_list.match(list[x].split("/")[-1]):
+ list.pop(x)
+ else:
+ x+=1
+ return list
+
+def getentries(mydir,recursive=0):
+ """(basedir,recursive=0)
+ Scans the given directory and returns an datadict of all the entries in
+ the directory seperated as a dirs dict and a files dict."""
+ myfn=mydir+"/CVS/Entries"
+ # entries=[dirs, files]
+ entries={"dirs":{},"files":{}}
+ if not os.path.exists(mydir):
+ return entries
+ try:
+ myfile=open(myfn, "r")
+ mylines=myfile.readlines()
+ myfile.close()
+ except SystemExit, e:
+ raise
+ except:
+ mylines=[]
+ for line in mylines:
+ if line and line[-1]=="\n":
+ line=line[:-1]
+ if not line:
+ continue
+ if line=="D": # End of entries file
+ break
+ mysplit=line.split("/")
+ if len(mysplit)!=6:
+ print "Confused:",mysplit
+ continue
+ if mysplit[0]=="D":
+ entries["dirs"][mysplit[1]]={"dirs":{},"files":{},"status":[]}
+ entries["dirs"][mysplit[1]]["status"]=["cvs"]
+ if os.path.isdir(mydir+"/"+mysplit[1]):
+ entries["dirs"][mysplit[1]]["status"]+=["exists"]
+ entries["dirs"][mysplit[1]]["flags"]=mysplit[2:]
+ if recursive:
+ rentries=getentries(mydir+"/"+mysplit[1],recursive)
+ #print rentries.keys()
+ #print entries["files"].keys()
+ #print entries["files"][mysplit[1]]
+ entries["dirs"][mysplit[1]]["dirs"]=rentries["dirs"]
+ entries["dirs"][mysplit[1]]["files"]=rentries["files"]
+ else:
+ # [D]/Name/revision/Date/Flags/Tags
+ entries["files"][mysplit[1]]={}
+ entries["files"][mysplit[1]]["revision"]=mysplit[2]
+ entries["files"][mysplit[1]]["date"]=mysplit[3]
+ entries["files"][mysplit[1]]["flags"]=mysplit[4]
+ entries["files"][mysplit[1]]["tags"]=mysplit[5]
+ entries["files"][mysplit[1]]["status"]=["cvs"]
+ if entries["files"][mysplit[1]]["revision"][0]=="-":
+ entries["files"][mysplit[1]]["status"]+=["removed"]
+
+ for file in apply_cvsignore_filter(os.listdir(mydir)):
+ if file=="CVS":
+ continue
+ if file=="digest-framerd-2.4.3":
+ print mydir,file
+ if os.path.isdir(mydir+"/"+file):
+ if not entries["dirs"].has_key(file):
+ entries["dirs"][file]={"dirs":{},"files":{}}
+ if entries["dirs"][file].has_key("status"):
+ if "exists" not in entries["dirs"][file]["status"]:
+ entries["dirs"][file]["status"]+=["exists"]
+ else:
+ entries["dirs"][file]["status"]=["exists"]
+ elif os.path.isfile(mydir+"/"+file):
+ if file=="digest-framerd-2.4.3":
+ print "isfile"
+ if not entries["files"].has_key(file):
+ entries["files"][file]={"revision":"","date":"","flags":"","tags":""}
+ if entries["files"][file].has_key("status"):
+ if file=="digest-framerd-2.4.3":
+ print "has status"
+ if "exists" not in entries["files"][file]["status"]:
+ if file=="digest-framerd-2.4.3":
+ print "no exists in status"
+ entries["files"][file]["status"]+=["exists"]
+ else:
+ if file=="digest-framerd-2.4.3":
+ print "no status"
+ entries["files"][file]["status"]=["exists"]
+ try:
+ if file=="digest-framerd-2.4.3":
+ print "stat'ing"
+ mystat=os.stat(mydir+"/"+file)
+ mytime=time.asctime(time.gmtime(mystat[ST_MTIME]))
+ if not entries["files"][file].has_key("status"):
+ if file=="digest-framerd-2.4.3":
+ print "status not set"
+ entries["files"][file]["status"]=[]
+ if file=="digest-framerd-2.4.3":
+ print "date:",entries["files"][file]["date"]
+ print "sdate:",mytime
+ if mytime==entries["files"][file]["date"]:
+ entries["files"][file]["status"]+=["current"]
+ if file=="digest-framerd-2.4.3":
+ print "stat done"
+
+ del mystat
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "failed to stat",file
+ print e
+ return
+
+ else:
+ print
+ print "File of unknown type:",mydir+"/"+file
+ print
+ return entries
+
+#class cvstree:
+# def __init__(self,basedir):
+# self.refdir=os.cwd()
+# self.basedir=basedir
+# self.entries={}
+# self.entries["dirs"]={}
+# self.entries["files"]={}
+# self.entries["dirs"][self.basedir]=getentries(self.basedir)
+# self.getrealdirs(self.dirs, self.files)
+# def getrealdirs(self,dirs,files):
+# for mydir in dirs.keys():
+# list = os.listdir(
+
+
diff --git a/pym/portage/data.py b/pym/portage/data.py
new file mode 100644
index 00000000..707c76b2
--- /dev/null
+++ b/pym/portage/data.py
@@ -0,0 +1,126 @@
+# portage_data.py -- Calculated/Discovered Data Values
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+
+import os,pwd,grp
+from portage_util import writemsg
+from output import green,red
+from output import create_color_func
+bad = create_color_func("BAD")
+
+ostype=os.uname()[0]
+
+lchown = None
+if ostype=="Linux" or ostype.lower().endswith("gnu"):
+ userland="GNU"
+ os.environ["XARGS"]="xargs -r"
+elif ostype == "Darwin":
+ userland="Darwin"
+ os.environ["XARGS"]="xargs"
+ def lchown(*pos_args, **key_args):
+ pass
+elif ostype.endswith("BSD") or ostype =="DragonFly":
+ userland="BSD"
+ os.environ["XARGS"]="xargs"
+else:
+ writemsg(red("Operating system")+" \""+ostype+"\" "+red("currently unsupported. Exiting.")+"\n")
+ sys.exit(1)
+
+if not lchown:
+ if "lchown" in dir(os):
+ # Included in python-2.3
+ lchown = os.lchown
+ else:
+ try:
+ import missingos
+ lchown = missingos.lchown
+ except ImportError:
+ def lchown(*pos_args, **key_args):
+ writemsg(red("!!!") + " It seems that os.lchown does not" + \
+ " exist. Please rebuild python.\n", noiselevel=-1)
+ lchown()
+
+os.environ["USERLAND"]=userland
+
+def portage_group_warning():
+ warn_prefix = bad("*** WARNING *** ")
+ mylines = [
+ "For security reasons, only system administrators should be",
+ "allowed in the portage group. Untrusted users or processes",
+ "can potentially exploit the portage group for attacks such as",
+ "local privilege escalation."
+ ]
+ for x in mylines:
+ writemsg(warn_prefix, noiselevel=-1)
+ writemsg(x, noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+# Portage has 3 security levels that depend on the uid and gid of the main
+# process and are assigned according to the following table:
+#
+# Privileges secpass uid gid
+# normal 0 any any
+# group 1 any portage_gid
+# super 2 0 any
+#
+# If the "wheel" group does not exist then wheelgid falls back to 0.
+# If the "portage" group does not exist then portage_uid falls back to wheelgid.
+
+secpass=0
+
+uid=os.getuid()
+wheelgid=0
+
+if uid==0:
+ secpass=2
+try:
+ wheelgid=grp.getgrnam("wheel")[2]
+except KeyError:
+ writemsg("portage initialization: your system doesn't have a 'wheel' group.\n")
+ writemsg("Please fix this as it is a normal system requirement. 'wheel' is GID 10\n")
+ writemsg("`emerge baselayout` and a config update with dispatch-conf, etc-update\n")
+ writemsg("or cfg-update should remedy this problem.\n")
+ pass
+
+#Discover the uid and gid of the portage user/group
+try:
+ portage_uid=pwd.getpwnam("portage")[2]
+ portage_gid=grp.getgrnam("portage")[2]
+ if secpass < 1 and portage_gid in os.getgroups():
+ secpass=1
+except KeyError:
+ portage_uid=0
+ portage_gid=0
+ writemsg("\n")
+ writemsg( red("portage: 'portage' user or group missing. Please update baselayout\n"))
+ writemsg( red(" and merge portage user(250) and group(250) into your passwd\n"))
+ writemsg( red(" and group files. Non-root compilation is disabled until then.\n"))
+ writemsg( " Also note that non-root/wheel users will need to be added to\n")
+ writemsg( " the portage group to do portage commands.\n")
+ writemsg("\n")
+ writemsg( " For the defaults, line 1 goes into passwd, and 2 into group.\n")
+ writemsg(green(" portage:x:250:250:portage:/var/tmp/portage:/bin/false\n"))
+ writemsg(green(" portage::250:portage\n"))
+ writemsg("\n")
+ portage_group_warning()
+
+userpriv_groups = [portage_gid]
+if secpass >= 2:
+ # Get a list of group IDs for the portage user. Do not use grp.getgrall()
+ # since it is known to trigger spurious SIGPIPE problems with nss_ldap.
+ from commands import getstatusoutput
+ mystatus, myoutput = getstatusoutput("id -G portage")
+ if mystatus == os.EX_OK:
+ for x in myoutput.split():
+ try:
+ userpriv_groups.append(int(x))
+ except ValueError:
+ pass
+ del x
+ userpriv_groups = list(set(userpriv_groups))
+ del getstatusoutput, mystatus, myoutput
diff --git a/pym/portage/debug.py b/pym/portage/debug.py
new file mode 100644
index 00000000..2ee8bcf2
--- /dev/null
+++ b/pym/portage/debug.py
@@ -0,0 +1,115 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+import os, sys, threading
+
+import portage_const
+from portage_util import writemsg
+
+def set_trace(on=True):
+ if on:
+ t = trace_handler()
+ threading.settrace(t.event_handler)
+ sys.settrace(t.event_handler)
+ else:
+ sys.settrace(None)
+ threading.settrace(None)
+
+class trace_handler(object):
+
+ def __init__(self):
+ python_system_paths = []
+ for x in sys.path:
+ if os.path.basename(x).startswith("python2."):
+ python_system_paths.append(x)
+
+ self.ignore_prefixes = []
+ for x in python_system_paths:
+ self.ignore_prefixes.append(x + os.sep)
+
+ self.trim_filename = prefix_trimmer(os.path.join(portage_const.PORTAGE_BASE_PATH, "pym") + os.sep).trim
+ self.show_local_lines = False
+ self.max_repr_length = 200
+
+ def event_handler(self, *args):
+ frame, event, arg = args
+ if "line" == event:
+ if self.show_local_lines:
+ self.trace_line(*args)
+ else:
+ if not self.ignore_filename(frame.f_code.co_filename):
+ self.trace_event(*args)
+ return self.event_handler
+
+ def trace_event(self, frame, event, arg):
+ writemsg("%s line=%d name=%s event=%s %slocals=%s\n" % \
+ (self.trim_filename(frame.f_code.co_filename),
+ frame.f_lineno,
+ frame.f_code.co_name,
+ event,
+ self.arg_repr(frame, event, arg),
+ self.locals_repr(frame, event, arg)))
+
+ def arg_repr(self, frame, event, arg):
+ my_repr = None
+ if "return" == event:
+ my_repr = repr(arg)
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "value=%s " % my_repr
+ elif "exception" == event:
+ my_repr = repr(arg[1])
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "type=%s value=%s " % (arg[0], my_repr)
+
+ return ""
+
+ def trace_line(self, frame, event, arg):
+ writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
+
+ def ignore_filename(self, filename):
+ if filename:
+ for x in self.ignore_prefixes:
+ if filename.startswith(x):
+ return True
+ return False
+
+ def locals_repr(self, frame, event, arg):
+ """Create a representation of the locals dict that is suitable for
+ tracing output."""
+
+ my_locals = frame.f_locals.copy()
+
+ # prevent unsafe __repr__ call on self when __init__ is called
+ # (method calls aren't safe until after __init__ has completed).
+ if frame.f_code.co_name == "__init__" and "self" in my_locals:
+ my_locals["self"] = "omitted"
+
+ # We omit items that will lead to unreasonable bloat of the trace
+ # output (and resulting log file).
+ for k, v in my_locals.iteritems():
+ my_repr = repr(v)
+ if len(my_repr) > self.max_repr_length:
+ my_locals[k] = "omitted"
+ return my_locals
+
+class prefix_trimmer(object):
+ def __init__(self, prefix):
+ self.prefix = prefix
+ self.cut_index = len(prefix)
+ self.previous = None
+ self.previous_trimmed = None
+
+ def trim(self, s):
+ """Remove a prefix from the string and return the result.
+ The previous result is automatically cached."""
+ if s == self.previous:
+ return self.previous_trimmed
+ else:
+ if s.startswith(self.prefix):
+ self.previous_trimmed = s[self.cut_index:]
+ else:
+ self.previous_trimmed = s
+ return self.previous_trimmed
diff --git a/pym/portage/dep.py b/pym/portage/dep.py
new file mode 100644
index 00000000..bf40452a
--- /dev/null
+++ b/pym/portage/dep.py
@@ -0,0 +1,646 @@
+# deps.py -- Portage dependency resolution functions
+# Copyright 2003-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+# DEPEND SYNTAX:
+#
+# 'use?' only affects the immediately following word!
+# Nesting is the only legal way to form multiple '[!]use?' requirements.
+#
+# Where: 'a' and 'b' are use flags, and 'z' is a depend atom.
+#
+# "a? z" -- If 'a' in [use], then b is valid.
+# "a? ( z )" -- Syntax with parenthesis.
+# "a? b? z" -- Deprecated.
+# "a? ( b? z )" -- Valid
+# "a? ( b? ( z ) ) -- Valid
+#
+
+import re, sys, types
+import portage_exception
+from portage_exception import InvalidData
+from portage_versions import catpkgsplit, catsplit, pkgcmp, pkgsplit, ververify
+
+def cpvequal(cpv1, cpv2):
+ split1 = catpkgsplit(cpv1)
+ split2 = catpkgsplit(cpv2)
+
+ if not split1 or not split2:
+ raise portage_exception.PortageException("Invalid data '%s, %s', parameter was not a CPV" % (cpv1, cpv2))
+
+ if split1[0] != split2[0]:
+ return False
+
+ return (pkgcmp(split1[1:], split2[1:]) == 0)
+
+def strip_empty(myarr):
+ """
+ Strip all empty elements from an array
+
+ @param myarr: The list of elements
+ @type myarr: List
+ @rtype: Array
+ @return: The array with empty elements removed
+ """
+ for x in range(len(myarr)-1, -1, -1):
+ if not myarr[x]:
+ del myarr[x]
+ return myarr
+
+def paren_reduce(mystr,tokenize=1):
+ """
+ Take a string and convert all paren enclosed entities into sublists, optionally
+ futher splitting the list elements by spaces.
+
+ Example usage:
+ >>> paren_reduce('foobar foo ( bar baz )',1)
+ ['foobar', 'foo', ['bar', 'baz']]
+ >>> paren_reduce('foobar foo ( bar baz )',0)
+ ['foobar foo ', [' bar baz ']]
+
+ @param mystr: The string to reduce
+ @type mystr: String
+ @param tokenize: Split on spaces to produces further list breakdown
+ @type tokenize: Integer
+ @rtype: Array
+ @return: The reduced string in an array
+ """
+ mylist = []
+ while mystr:
+ if ("(" not in mystr) and (")" not in mystr):
+ freesec = mystr
+ subsec = None
+ tail = ""
+ elif mystr[0] == ")":
+ return [mylist,mystr[1:]]
+ elif ("(" in mystr) and (mystr.index("(") < mystr.index(")")):
+ freesec,subsec = mystr.split("(",1)
+ subsec,tail = paren_reduce(subsec,tokenize)
+ else:
+ subsec,tail = mystr.split(")",1)
+ if tokenize:
+ subsec = strip_empty(subsec.split(" "))
+ return [mylist+subsec,tail]
+ return mylist+[subsec],tail
+ mystr = tail
+ if freesec:
+ if tokenize:
+ mylist = mylist + strip_empty(freesec.split(" "))
+ else:
+ mylist = mylist + [freesec]
+ if subsec is not None:
+ mylist = mylist + [subsec]
+ return mylist
+
+def paren_enclose(mylist):
+ """
+ Convert a list to a string with sublists enclosed with parens.
+
+ Example usage:
+ >>> test = ['foobar','foo',['bar','baz']]
+ >>> paren_enclose(test)
+ 'foobar foo ( bar baz )'
+
+ @param mylist: The list
+ @type mylist: List
+ @rtype: String
+ @return: The paren enclosed string
+ """
+ mystrparts = []
+ for x in mylist:
+ if isinstance(x, list):
+ mystrparts.append("( "+paren_enclose(x)+" )")
+ else:
+ mystrparts.append(x)
+ return " ".join(mystrparts)
+
+# This is just for use by emerge so that it can enable a backward compatibility
+# mode in order to gracefully deal with installed packages that have invalid
+# atoms or dep syntax.
+_dep_check_strict = True
+
+def use_reduce(deparray, uselist=[], masklist=[], matchall=0, excludeall=[]):
+ """
+ Takes a paren_reduce'd array and reduces the use? conditionals out
+ leaving an array with subarrays
+
+ @param deparray: paren_reduce'd list of deps
+ @type deparray: List
+ @param uselist: List of use flags
+ @type uselist: List
+ @param masklist: List of masked flags
+ @type masklist: List
+ @param matchall: Resolve all conditional deps unconditionally. Used by repoman
+ @type matchall: Integer
+ @rtype: List
+ @return: The use reduced depend array
+ """
+ # Quick validity checks
+ for x in range(len(deparray)):
+ if deparray[x] in ["||","&&"]:
+ if len(deparray) - 1 == x or not isinstance(deparray[x+1], list):
+ raise portage_exception.InvalidDependString(deparray[x]+" missing atom list in \""+paren_enclose(deparray)+"\"")
+ if deparray and deparray[-1] and deparray[-1][-1] == "?":
+ raise portage_exception.InvalidDependString("Conditional without target in \""+paren_enclose(deparray)+"\"")
+
+ global _dep_check_strict
+
+ mydeparray = deparray[:]
+ rlist = []
+ while mydeparray:
+ head = mydeparray.pop(0)
+
+ if type(head) == types.ListType:
+ additions = use_reduce(head, uselist, masklist, matchall, excludeall)
+ if additions:
+ rlist.append(additions)
+ elif rlist and rlist[-1] == "||":
+ #XXX: Currently some DEPEND strings have || lists without default atoms.
+ # raise portage_exception.InvalidDependString("No default atom(s) in \""+paren_enclose(deparray)+"\"")
+ rlist.append([])
+
+ else:
+ if head[-1] == "?": # Use reduce next group on fail.
+ # Pull any other use conditions and the following atom or list into a separate array
+ newdeparray = [head]
+ while isinstance(newdeparray[-1], str) and newdeparray[-1][-1] == "?":
+ if mydeparray:
+ newdeparray.append(mydeparray.pop(0))
+ else:
+ raise ValueError, "Conditional with no target."
+
+ # Deprecation checks
+ warned = 0
+ if len(newdeparray[-1]) == 0:
+ sys.stderr.write("Note: Empty target in string. (Deprecated)\n")
+ warned = 1
+ if len(newdeparray) != 2:
+ sys.stderr.write("Note: Nested use flags without parenthesis (Deprecated)\n")
+ warned = 1
+ if warned:
+ sys.stderr.write(" --> "+" ".join(map(str,[head]+newdeparray))+"\n")
+
+ # Check that each flag matches
+ ismatch = True
+ for head in newdeparray[:-1]:
+ head = head[:-1]
+ if head[0] == "!":
+ head_key = head[1:]
+ if not matchall and head_key in uselist or \
+ head_key in excludeall:
+ ismatch = False
+ break
+ elif head not in masklist:
+ if not matchall and head not in uselist:
+ ismatch = False
+ break
+ else:
+ ismatch = False
+
+ # If they all match, process the target
+ if ismatch:
+ target = newdeparray[-1]
+ if isinstance(target, list):
+ additions = use_reduce(target, uselist, masklist, matchall, excludeall)
+ if additions:
+ rlist.append(additions)
+ elif not _dep_check_strict:
+ # The old deprecated behavior.
+ rlist.append(target)
+ else:
+ raise portage_exception.InvalidDependString(
+ "Conditional without parenthesis: '%s?'" % head)
+
+ else:
+ rlist += [head]
+
+ return rlist
+
+
+def dep_opconvert(deplist):
+ """
+ Iterate recursively through a list of deps, if the
+ dep is a '||' or '&&' operator, combine it with the
+ list of deps that follows..
+
+ Example usage:
+ >>> test = ["blah", "||", ["foo", "bar", "baz"]]
+ >>> dep_opconvert(test)
+ ['blah', ['||', 'foo', 'bar', 'baz']]
+
+ @param deplist: A list of deps to format
+ @type mydep: List
+ @rtype: List
+ @return:
+ The new list with the new ordering
+ """
+
+ retlist = []
+ x = 0
+ while x != len(deplist):
+ if isinstance(deplist[x], list):
+ retlist.append(dep_opconvert(deplist[x]))
+ elif deplist[x] == "||" or deplist[x] == "&&":
+ retlist.append([deplist[x]] + dep_opconvert(deplist[x+1]))
+ x += 1
+ else:
+ retlist.append(deplist[x])
+ x += 1
+ return retlist
+
+def get_operator(mydep):
+ """
+ Return the operator used in a depstring.
+
+ Example usage:
+ >>> from portage_dep import *
+ >>> get_operator(">=test-1.0")
+ '>='
+
+ @param mydep: The dep string to check
+ @type mydep: String
+ @rtype: String
+ @return: The operator. One of:
+ '~', '=', '>', '<', '=*', '>=', or '<='
+ """
+ if mydep[0] == "~":
+ operator = "~"
+ elif mydep[0] == "=":
+ if mydep[-1] == "*":
+ operator = "=*"
+ else:
+ operator = "="
+ elif mydep[0] in "><":
+ if len(mydep) > 1 and mydep[1] == "=":
+ operator = mydep[0:2]
+ else:
+ operator = mydep[0]
+ else:
+ operator = None
+
+ return operator
+
+_dep_getcpv_cache = {}
+
+def dep_getcpv(mydep):
+ """
+ Return the category-package-version with any operators/slot specifications stripped off
+
+ Example usage:
+ >>> dep_getcpv('>=media-libs/test-3.0')
+ 'media-libs/test-3.0'
+
+ @param mydep: The depstring
+ @type mydep: String
+ @rtype: String
+ @return: The depstring with the operator removed
+ """
+ global _dep_getcpv_cache
+ retval = _dep_getcpv_cache.get(mydep, None)
+ if retval is not None:
+ return retval
+ mydep_orig = mydep
+ if mydep and mydep[0] == "*":
+ mydep = mydep[1:]
+ if mydep and mydep[-1] == "*":
+ mydep = mydep[:-1]
+ if mydep and mydep[0] == "!":
+ mydep = mydep[1:]
+ if mydep[:2] in [">=", "<="]:
+ mydep = mydep[2:]
+ elif mydep[:1] in "=<>~":
+ mydep = mydep[1:]
+ colon = mydep.rfind(":")
+ if colon != -1:
+ mydep = mydep[:colon]
+ _dep_getcpv_cache[mydep_orig] = mydep
+ return mydep
+
+def dep_getslot(mydep):
+ """
+ Retrieve the slot on a depend.
+
+ Example usage:
+ >>> dep_getslot('app-misc/test:3')
+ '3'
+
+ @param mydep: The depstring to retrieve the slot of
+ @type mydep: String
+ @rtype: String
+ @return: The slot
+ """
+ colon = mydep.rfind(":")
+ if colon != -1:
+ return mydep[colon+1:]
+ return None
+
+_invalid_atom_chars_regexp = re.compile("[()|?]")
+
+def isvalidatom(atom, allow_blockers=False):
+ """
+ Check to see if a depend atom is valid
+
+ Example usage:
+ >>> isvalidatom('media-libs/test-3.0')
+ 0
+ >>> isvalidatom('>=media-libs/test-3.0')
+ 1
+
+ @param atom: The depend atom to check against
+ @type atom: String
+ @rtype: Integer
+ @return: One of the following:
+ 1) 0 if the atom is invalid
+ 2) 1 if the atom is valid
+ """
+ global _invalid_atom_chars_regexp
+ if _invalid_atom_chars_regexp.search(atom):
+ return 0
+ if allow_blockers and atom.startswith("!"):
+ atom = atom[1:]
+ try:
+ mycpv_cps = catpkgsplit(dep_getcpv(atom))
+ except InvalidData:
+ return 0
+ operator = get_operator(atom)
+ if operator:
+ if operator[0] in "<>" and atom[-1] == "*":
+ return 0
+ if mycpv_cps and mycpv_cps[0] != "null":
+ # >=cat/pkg-1.0
+ return 1
+ else:
+ # >=cat/pkg or >=pkg-1.0 (no category)
+ return 0
+ if mycpv_cps:
+ # cat/pkg-1.0
+ return 0
+
+ if (len(atom.split('/')) == 2):
+ # cat/pkg
+ return 1
+ else:
+ return 0
+
+def isjustname(mypkg):
+ """
+ Checks to see if the depstring is only the package name (no version parts)
+
+ Example usage:
+ >>> isjustname('media-libs/test-3.0')
+ 0
+ >>> isjustname('test')
+ 1
+ >>> isjustname('media-libs/test')
+ 1
+
+ @param mypkg: The package atom to check
+ @param mypkg: String
+ @rtype: Integer
+ @return: One of the following:
+ 1) 0 if the package string is not just the package name
+ 2) 1 if it is
+ """
+ myparts = mypkg.split('-')
+ for x in myparts:
+ if ververify(x):
+ return 0
+ return 1
+
+iscache = {}
+
+def isspecific(mypkg):
+ """
+ Checks to see if a package is in category/package-version or package-version format,
+ possibly returning a cached result.
+
+ Example usage:
+ >>> isspecific('media-libs/test')
+ 0
+ >>> isspecific('media-libs/test-3.0')
+ 1
+
+ @param mypkg: The package depstring to check against
+ @type mypkg: String
+ @rtype: Integer
+ @return: One of the following:
+ 1) 0 if the package string is not specific
+ 2) 1 if it is
+ """
+ try:
+ return iscache[mypkg]
+ except KeyError:
+ pass
+ mysplit = mypkg.split("/")
+ if not isjustname(mysplit[-1]):
+ iscache[mypkg] = 1
+ return 1
+ iscache[mypkg] = 0
+ return 0
+
+def dep_getkey(mydep):
+ """
+ Return the category/package-name of a depstring.
+
+ Example usage:
+ >>> dep_getkey('media-libs/test-3.0')
+ 'media-libs/test'
+
+ @param mydep: The depstring to retrieve the category/package-name of
+ @type mydep: String
+ @rtype: String
+ @return: The package category/package-version
+ """
+ mydep = dep_getcpv(mydep)
+ if mydep and isspecific(mydep):
+ mysplit = catpkgsplit(mydep)
+ if not mysplit:
+ return mydep
+ return mysplit[0] + "/" + mysplit[1]
+ else:
+ return mydep
+
+def match_to_list(mypkg, mylist):
+ """
+ Searches list for entries that matches the package.
+
+ @param mypkg: The package atom to match
+ @type mypkg: String
+ @param mylist: The list of package atoms to compare against
+ @param mylist: List
+ @rtype: List
+ @return: A unique list of package atoms that match the given package atom
+ """
+ matches = []
+ for x in mylist:
+ if match_from_list(x, [mypkg]):
+ if x not in matches:
+ matches.append(x)
+ return matches
+
+def best_match_to_list(mypkg, mylist):
+ """
+ Returns the most specific entry that matches the package given.
+
+ @param mypkg: The package atom to check
+ @type mypkg: String
+ @param mylist: The list of package atoms to check against
+ @type mylist: List
+ @rtype: String
+ @return: The package atom which best matches given the following ordering:
+ - =cpv 6
+ - ~cpv 5
+ - =cpv* 4
+ - cp:slot 3
+ - >cpv 2
+ - <cpv 2
+ - >=cpv 2
+ - <=cpv 2
+ - cp 1
+ """
+ operator_values = {'=':6, '~':5, '=*':4,
+ '>':2, '<':2, '>=':2, '<=':2, None:1}
+ maxvalue = 0
+ bestm = None
+ for x in match_to_list(mypkg, mylist):
+ if dep_getslot(x) is not None:
+ if maxvalue < 3:
+ maxvalue = 3
+ bestm = x
+ continue
+ op_val = operator_values[get_operator(x)]
+ if op_val > maxvalue:
+ maxvalue = op_val
+ bestm = x
+ return bestm
+
+_match_from_list_cache = {}
+
+def match_from_list(mydep, candidate_list):
+ """
+ Searches list for entries that matches the package.
+
+ @param mydep: The package atom to match
+ @type mydep: String
+ @param candidate_list: The list of package atoms to compare against
+ @param candidate_list: List
+ @rtype: List
+ @return: A list of package atoms that match the given package atom
+ """
+
+ global _match_from_list_cache
+ cache_key = (mydep, tuple(candidate_list))
+ mylist = _match_from_list_cache.get(cache_key, None)
+ if mylist is not None:
+ return mylist[:]
+
+ from portage_util import writemsg
+ if mydep[0] == "!":
+ mydep = mydep[1:]
+
+ mycpv = dep_getcpv(mydep)
+ mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
+ slot = None
+
+ if not mycpv_cps:
+ cat, pkg = catsplit(mycpv)
+ ver = None
+ rev = None
+ slot = dep_getslot(mydep)
+ else:
+ cat, pkg, ver, rev = mycpv_cps
+ if mydep == mycpv:
+ raise KeyError("Specific key requires an operator" + \
+ " (%s) (try adding an '=')" % (mydep))
+
+ if ver and rev:
+ operator = get_operator(mydep)
+ if not operator:
+ writemsg("!!! Invalid atom: %s\n" % mydep, noiselevel=-1)
+ return []
+ else:
+ operator = None
+
+ mylist = []
+
+ if operator is None:
+ for x in candidate_list:
+ xs = pkgsplit(x)
+ if xs is None:
+ xcpv = dep_getcpv(x)
+ if slot is not None:
+ xslot = dep_getslot(x)
+ if xslot is not None and xslot != slot:
+ """ This function isn't given enough information to
+ reject atoms based on slot unless *both* compared atoms
+ specify slots."""
+ continue
+ if xcpv != mycpv:
+ continue
+ elif xs[0] != mycpv:
+ continue
+ mylist.append(x)
+
+ elif operator == "=": # Exact match
+ mylist = [cpv for cpv in candidate_list if cpvequal(cpv, mycpv)]
+
+ elif operator == "=*": # glob match
+ # XXX: Nasty special casing for leading zeros
+ # Required as =* is a literal prefix match, so can't
+ # use vercmp
+ mysplit = catpkgsplit(mycpv)
+ myver = mysplit[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ mycpv = mysplit[0]+"/"+mysplit[1]+"-"+myver
+ for x in candidate_list:
+ xs = catpkgsplit(x)
+ myver = xs[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ xcpv = xs[0]+"/"+xs[1]+"-"+myver
+ if xcpv.startswith(mycpv):
+ mylist.append(x)
+
+ elif operator == "~": # version, any revision, match
+ for x in candidate_list:
+ xs = catpkgsplit(x)
+ if xs is None:
+ raise InvalidData(x)
+ if not cpvequal(xs[0]+"/"+xs[1]+"-"+xs[2], mycpv_cps[0]+"/"+mycpv_cps[1]+"-"+mycpv_cps[2]):
+ continue
+ if xs[2] != ver:
+ continue
+ mylist.append(x)
+
+ elif operator in [">", ">=", "<", "<="]:
+ mysplit = ["%s/%s" % (cat, pkg), ver, rev]
+ for x in candidate_list:
+ try:
+ result = pkgcmp(pkgsplit(x), mysplit)
+ except ValueError: # pkgcmp may return ValueError during int() conversion
+ writemsg("\nInvalid package name: %s\n" % x, noiselevel=-1)
+ raise
+ if result is None:
+ continue
+ elif operator == ">":
+ if result > 0:
+ mylist.append(x)
+ elif operator == ">=":
+ if result >= 0:
+ mylist.append(x)
+ elif operator == "<":
+ if result < 0:
+ mylist.append(x)
+ elif operator == "<=":
+ if result <= 0:
+ mylist.append(x)
+ else:
+ raise KeyError("Unknown operator: %s" % mydep)
+ else:
+ raise KeyError("Unknown operator: %s" % mydep)
+
+ _match_from_list_cache[cache_key] = mylist
+ return mylist
diff --git a/pym/portage/dispatch_conf.py b/pym/portage/dispatch_conf.py
new file mode 100644
index 00000000..690772bf
--- /dev/null
+++ b/pym/portage/dispatch_conf.py
@@ -0,0 +1,161 @@
+# archive_conf.py -- functionality common to archive-conf and dispatch-conf
+# Copyright 2003-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+# Library by Wayne Davison <gentoo@blorf.net>, derived from code
+# written by Jeremy Wohl (http://igmus.org)
+
+from stat import *
+import os, sys, commands, shutil
+
+import portage
+
+RCS_BRANCH = '1.1.1'
+RCS_LOCK = 'rcs -ko -M -l'
+RCS_PUT = 'ci -t-"Archived config file." -m"dispatch-conf update."'
+RCS_GET = 'co'
+RCS_MERGE = 'rcsmerge -p -r' + RCS_BRANCH + ' %s >%s'
+
+DIFF3_MERGE = 'diff3 -mE %s %s %s >%s'
+
+def read_config(mandatory_opts):
+ try:
+ opts = portage.getconfig('/etc/dispatch-conf.conf')
+ except:
+ opts = None
+
+ if not opts:
+ print >> sys.stderr, 'dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'
+ sys.exit(1)
+
+ for key in mandatory_opts:
+ if not opts.has_key(key):
+ if key == "merge":
+ opts["merge"] = "sdiff --suppress-common-lines --output=%s %s %s"
+ else:
+ print >> sys.stderr, 'dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal' % (key,)
+
+ if not os.path.exists(opts['archive-dir']):
+ os.mkdir(opts['archive-dir'])
+ elif not os.path.isdir(opts['archive-dir']):
+ print >> sys.stderr, 'dispatch-conf: Config archive dir [%s] must exist; fatal' % (opts['archive-dir'],)
+ sys.exit(1)
+
+ return opts
+
+
+def rcs_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config in rcs (on trunk). Then, if mrgconf is
+ specified and an old branch version exists, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, leave it in the archive dir with a .dist.new
+ suffix along with the last 1.1.1 branch version with a .dist suffix."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except:
+ pass
+
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (curconf, archive, str(why))
+ if os.path.exists(archive + ',v'):
+ os.system(RCS_LOCK + ' ' + archive)
+ os.system(RCS_PUT + ' ' + archive)
+
+ ret = 0
+ if newconf != '':
+ os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
+ has_branch = os.path.exists(archive)
+ if has_branch:
+ os.rename(archive, archive + '.dist')
+
+ try:
+ shutil.copy2(newconf, archive)
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (newconf, archive, str(why))
+
+ if has_branch:
+ if mrgconf != '':
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(RCS_MERGE % (archive, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat[ST_MODE])
+ os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
+ os.rename(archive, archive + '.dist.new')
+ return ret
+
+
+def file_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config to the archive-dir, bumping old versions
+ out of the way into .# versions (log-rotate style). Then, if mrgconf
+ was specified and there is a .dist version, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, archive it as a .dist.new version (which
+ gets moved to the .dist version at the end of the processing)."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except:
+ pass
+
+ # Archive the current config file if it isn't already saved
+ if os.path.exists(archive) \
+ and len(commands.getoutput('diff -aq %s %s' % (curconf,archive))) != 0:
+ suf = 1
+ while suf < 9 and os.path.exists(archive + '.' + str(suf)):
+ suf += 1
+
+ while suf > 1:
+ os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
+ suf -= 1
+
+ os.rename(archive, archive + '.1')
+
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (curconf, archive, str(why))
+
+ if newconf != '':
+ # Save off new config file in the archive dir with .dist.new suffix
+ try:
+ shutil.copy2(newconf, archive + '.dist.new')
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (newconf, archive + '.dist.new', str(why))
+
+ ret = 0
+ if mrgconf != '' and os.path.exists(archive + '.dist'):
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat[ST_MODE])
+ os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
+
+ return ret
+
+
+def rcs_archive_post_process(archive):
+ """Check in the archive file with the .dist.new suffix on the branch
+ and remove the one with the .dist suffix."""
+ os.rename(archive + '.dist.new', archive)
+ if os.path.exists(archive + '.dist'):
+ # Commit the last-distributed version onto the branch.
+ os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
+ os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
+ os.unlink(archive + '.dist')
+ else:
+ # Forcefully commit the last-distributed version onto the branch.
+ os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
+
+
+def file_archive_post_process(archive):
+ """Rename the archive file with the .dist.new suffix to a .dist suffix"""
+ os.rename(archive + '.dist.new', archive + '.dist')
diff --git a/pym/portage/eclass_cache.py b/pym/portage/eclass_cache.py
new file mode 100644
index 00000000..91b98fec
--- /dev/null
+++ b/pym/portage/eclass_cache.py
@@ -0,0 +1,83 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Nicholas Carpaski (carpaski@gentoo.org), Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+from portage_util import normalize_path, writemsg
+import os, sys
+from portage_data import portage_gid
+
+class cache:
+ """
+ Maintains the cache information about eclasses used in ebuild.
+ """
+ def __init__(self, porttree_root, overlays=[]):
+ self.porttree_root = porttree_root
+
+ self.eclasses = {} # {"Name": ("location","_mtime_")}
+ self._eclass_locations = {}
+
+ # screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
+ # ~harring
+ self.porttrees = [self.porttree_root]+overlays
+ self.porttrees = tuple(map(normalize_path, self.porttrees))
+ self._master_eclass_root = os.path.join(self.porttrees[0],"eclass")
+ self.update_eclasses()
+
+ def close_caches(self):
+ import traceback
+ traceback.print_stack()
+ print "%s close_cache is deprecated" % self.__class__
+ self.eclasses.clear()
+
+ def flush_cache(self):
+ import traceback
+ traceback.print_stack()
+ print "%s flush_cache is deprecated" % self.__class__
+
+ self.update_eclasses()
+
+ def update_eclasses(self):
+ self.eclasses = {}
+ self._eclass_locations = {}
+ eclass_len = len(".eclass")
+ for x in [normalize_path(os.path.join(y,"eclass")) for y in self.porttrees]:
+ if not os.path.isdir(x):
+ continue
+ for y in [y for y in os.listdir(x) if y.endswith(".eclass")]:
+ try:
+ mtime = long(os.stat(os.path.join(x, y)).st_mtime)
+ except OSError:
+ continue
+ ys=y[:-eclass_len]
+ self.eclasses[ys] = (x, long(mtime))
+ self._eclass_locations[ys] = x
+
+ def is_eclass_data_valid(self, ec_dict):
+ if not isinstance(ec_dict, dict):
+ return False
+ for eclass, tup in ec_dict.iteritems():
+ cached_data = self.eclasses.get(eclass, None)
+ """ Only use the mtime for validation since the probability of a
+ collision is small and, depending on the cache implementation, the
+ path may not be specified (cache from rsync mirrors, for example).
+ """
+ if cached_data is None or tup[1] != cached_data[1]:
+ return False
+
+ return True
+
+ def get_eclass_data(self, inherits, from_master_only=False):
+ ec_dict = {}
+ for x in inherits:
+ try:
+ ec_dict[x] = self.eclasses[x]
+ except KeyError:
+ print "ec=",ec_dict
+ print "inherits=",inherits
+ raise
+ if from_master_only and \
+ self._eclass_locations[x] != self._master_eclass_root:
+ return None
+
+ return ec_dict
diff --git a/pym/portage/elog_modules/__init__.py b/pym/portage/elog_modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/pym/portage/elog_modules/__init__.py
diff --git a/pym/portage/elog_modules/mod_custom.py b/pym/portage/elog_modules/mod_custom.py
new file mode 100644
index 00000000..d609e79b
--- /dev/null
+++ b/pym/portage/elog_modules/mod_custom.py
@@ -0,0 +1,16 @@
+import elog_modules.mod_save, portage_exec, portage_exception
+
+def process(mysettings, cpv, logentries, fulltext):
+ elogfilename = elog_modules.mod_save.process(mysettings, cpv, logentries, fulltext)
+
+ if (not "PORTAGE_ELOG_COMMAND" in mysettings.keys()) \
+ or len(mysettings["PORTAGE_ELOG_COMMAND"]) == 0:
+ raise portage_exception.MissingParameter("!!! Custom logging requested but PORTAGE_ELOG_COMMAND is not defined")
+ else:
+ mylogcmd = mysettings["PORTAGE_ELOG_COMMAND"]
+ mylogcmd = mylogcmd.replace("${LOGFILE}", elogfilename)
+ mylogcmd = mylogcmd.replace("${PACKAGE}", cpv)
+ retval = portage_exec.spawn_bash(mylogcmd)
+ if retval != 0:
+ raise portage_exception.PortageException("!!! PORTAGE_ELOG_COMMAND failed with exitcode %d" % retval)
+ return
diff --git a/pym/portage/elog_modules/mod_mail.py b/pym/portage/elog_modules/mod_mail.py
new file mode 100644
index 00000000..b8e17a51
--- /dev/null
+++ b/pym/portage/elog_modules/mod_mail.py
@@ -0,0 +1,22 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+import portage_mail, socket
+
+def process(mysettings, cpv, logentries, fulltext):
+ if mysettings.has_key("PORTAGE_ELOG_MAILURI"):
+ myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
+ else:
+ myrecipient = "root@localhost"
+
+ myfrom = mysettings["PORTAGE_ELOG_MAILFROM"]
+ mysubject = mysettings["PORTAGE_ELOG_MAILSUBJECT"]
+ mysubject = mysubject.replace("${PACKAGE}", cpv)
+ mysubject = mysubject.replace("${HOST}", socket.getfqdn())
+
+ mymessage = portage_mail.create_message(myfrom, myrecipient, mysubject, fulltext)
+ portage_mail.send_mail(mysettings, mymessage)
+
+ return
diff --git a/pym/portage/elog_modules/mod_mail_summary.py b/pym/portage/elog_modules/mod_mail_summary.py
new file mode 100644
index 00000000..5e642f41
--- /dev/null
+++ b/pym/portage/elog_modules/mod_mail_summary.py
@@ -0,0 +1,40 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: mod_mail.py 3484 2006-06-10 22:38:44Z genone $
+
+import portage_mail, socket, os, time
+from email.MIMEText import MIMEText as TextMessage
+
+_items = {}
+def process(mysettings, cpv, logentries, fulltext):
+ header = ">>> Messages generated for package %s by process %d on %s:\n\n" % \
+ (cpv, os.getpid(), time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time())))
+ _items[cpv] = header + fulltext
+
+def finalize(mysettings):
+ if len(_items) == 0:
+ return
+ elif len(_items) == 1:
+ count = "one package"
+ else:
+ count = "multiple packages"
+ if mysettings.has_key("PORTAGE_ELOG_MAILURI"):
+ myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
+ else:
+ myrecipient = "root@localhost"
+
+ myfrom = mysettings["PORTAGE_ELOG_MAILFROM"]
+ mysubject = mysettings["PORTAGE_ELOG_MAILSUBJECT"]
+ mysubject = mysubject.replace("${PACKAGE}", count)
+ mysubject = mysubject.replace("${HOST}", socket.getfqdn())
+
+ mybody = "elog messages for the following packages generated by " + \
+ "process %d on host %s:\n" % (os.getpid(), socket.getfqdn())
+ for cpv in _items.keys():
+ mybody += "- %s\n" % cpv
+
+ mymessage = portage_mail.create_message(myfrom, myrecipient, mysubject, mybody, attachments=_items.values())
+ portage_mail.send_mail(mysettings, mymessage)
+
+ return
diff --git a/pym/portage/elog_modules/mod_save.py b/pym/portage/elog_modules/mod_save.py
new file mode 100644
index 00000000..4e1cd2cf
--- /dev/null
+++ b/pym/portage/elog_modules/mod_save.py
@@ -0,0 +1,21 @@
+import os, time
+from portage_data import portage_uid, portage_gid
+
+def process(mysettings, cpv, logentries, fulltext):
+ cpv_path = cpv.replace("/", ":")
+
+ if mysettings["PORT_LOGDIR"] != "":
+ elogdir = os.path.join(mysettings["PORT_LOGDIR"], "elog")
+ else:
+ elogdir = os.path.join(os.sep, "var", "log", "portage", "elog")
+ if not os.path.exists(elogdir):
+ os.makedirs(elogdir)
+ os.chown(elogdir, portage_uid, portage_gid)
+ os.chmod(elogdir, 02770)
+
+ elogfilename = elogdir+"/"+cpv_path+":"+time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time()))+".log"
+ elogfile = open(elogfilename, "w")
+ elogfile.write(fulltext)
+ elogfile.close()
+
+ return elogfilename
diff --git a/pym/portage/elog_modules/mod_save_summary.py b/pym/portage/elog_modules/mod_save_summary.py
new file mode 100644
index 00000000..7cb310d9
--- /dev/null
+++ b/pym/portage/elog_modules/mod_save_summary.py
@@ -0,0 +1,23 @@
+import os, time
+from portage_data import portage_uid, portage_gid
+
+def process(mysettings, cpv, logentries, fulltext):
+ if mysettings["PORT_LOGDIR"] != "":
+ elogdir = os.path.join(mysettings["PORT_LOGDIR"], "elog")
+ else:
+ elogdir = os.path.join(os.sep, "var", "log", "portage", "elog")
+ if not os.path.exists(elogdir):
+ os.makedirs(elogdir)
+ os.chown(elogdir, portage_uid, portage_gid)
+ os.chmod(elogdir, 02770)
+
+ # TODO: Locking
+ elogfilename = elogdir+"/summary.log"
+ elogfile = open(elogfilename, "a")
+ elogfile.write(">>> Messages generated by process %d on %s for package %s:\n\n" % \
+ (os.getpid(), time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(time.time())), cpv))
+ elogfile.write(fulltext)
+ elogfile.write("\n")
+ elogfile.close()
+
+ return elogfilename
diff --git a/pym/portage/elog_modules/mod_syslog.py b/pym/portage/elog_modules/mod_syslog.py
new file mode 100644
index 00000000..a95ecb45
--- /dev/null
+++ b/pym/portage/elog_modules/mod_syslog.py
@@ -0,0 +1,17 @@
+import syslog
+from portage_const import EBUILD_PHASES
+
+def process(mysettings, cpv, logentries, fulltext):
+ syslog.openlog("portage", syslog.LOG_ERR | syslog.LOG_WARNING | syslog.LOG_INFO | syslog.LOG_NOTICE, syslog.LOG_LOCAL5)
+ for phase in EBUILD_PHASES:
+ if not phase in logentries:
+ continue
+ for msgtype,msgcontent in logentries[phase]:
+ pri = {"INFO": syslog.LOG_INFO,
+ "WARN": syslog.LOG_WARNING,
+ "ERROR": syslog.LOG_ERR,
+ "LOG": syslog.LOG_NOTICE,
+ "QA": syslog.LOG_WARNING}
+ msgtext = "".join(msgcontent)
+ syslog.syslog(pri[msgtype], "%s: %s: %s" % (cpv, phase, msgtext))
+ syslog.closelog()
diff --git a/pym/portage/emergehelp.py b/pym/portage/emergehelp.py
new file mode 100644
index 00000000..373e0bf4
--- /dev/null
+++ b/pym/portage/emergehelp.py
@@ -0,0 +1,420 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+import os,sys
+from output import bold, turquoise, green
+
+def shorthelp():
+ print
+ print
+ print bold("Usage:")
+ print " "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuildfile")+" | "+turquoise("tbz2file")+" | "+turquoise("dependency")+" ] [ ... ]"
+ print " "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("system")+" | "+turquoise("world")+" >"
+ print " "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >"
+ print " "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]"
+ print " "+turquoise("emerge")+" "+turquoise("--help")+" [ "+green("system")+" | "+green("world")+" | "+green("config")+" | "+green("--sync")+" ] "
+ print bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhikKlnNoOpqPsStuvV")+"] ["+green("--oneshot")+"] ["+green("--newuse")+"] ["+green("--noconfmem")+"]"
+ print " [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ] [ "+green("--columns")+" ]"
+ print " ["+green("--nospinner")+"]"
+ print " [ "+green("--deep")+" ] [" + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ]"
+ print bold("Actions:")+" [ "+green("--clean")+" | "+green("--depclean")+" | "+green("--prune")+" | "+green("--regen")+" | "+green("--search")+" | "+green("--unmerge")+" ]"
+ print
+
+def help(myaction,myopts,havecolor=1):
+ if not myaction and ("--help" not in myopts):
+ shorthelp()
+ print
+ print " For more help try 'emerge --help' or consult the man page."
+ print
+ elif not myaction:
+ shorthelp()
+ print
+ print turquoise("Help (this screen):")
+ print " "+green("--help")+" ("+green("-h")+" short option)"
+ print " Displays this help; an additional argument (see above) will tell"
+ print " emerge to display detailed help."
+ print
+ print turquoise("Actions:")
+ print " "+green("--clean")+" ("+green("-c")+" short option)"
+ print " Cleans the system by removing outdated packages which will not"
+ print " remove functionalities or prevent your system from working."
+ print " The arguments can be in several different formats :"
+ print " * world "
+ print " * system or"
+ print " * 'dependency specification' (in single quotes is best.)"
+ print " Here are a few examples of the dependency specification format:"
+ print " "+bold("binutils")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print " "+bold("sys-devel/binutils")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print " "+bold(">sys-devel/binutils-2.11.90.0.7")+" matches"
+ print " binutils-2.11.92.0.12.3-r1"
+ print " "+bold(">=sys-devel/binutils-2.11.90.0.7")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print " "+bold("<=sys-devel/binutils-2.11.92.0.12.3-r1")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print
+ print " "+green("--config")
+ print " Runs package-specific operations that must be executed after an"
+ print " emerge process has completed. This usually entails configuration"
+ print " file setup or other similar setups that the user may wish to run."
+ print
+ print " "+green("--depclean")
+ print " Cleans the system by removing packages that are not associated"
+ print " with explicitly merged packages. Depclean works by creating the"
+ print " full dependency tree from the system list and the world file,"
+ print " then comparing it to installed packages. Packages installed, but"
+ print " not associated with an explicit merge are listed as candidates"
+ print " for unmerging."+turquoise(" WARNING: This can seriously affect your system by")
+ print " "+turquoise("removing packages that may have been linked against, but due to")
+ print " "+turquoise("changes in USE flags may no longer be part of the dep tree. Use")
+ print " "+turquoise("caution when employing this feature.")
+ print
+ print " "+green("--info")
+ print " Displays important portage variables that will be exported to"
+ print " ebuild.sh when performing merges. This information is useful"
+ print " for bug reports and verification of settings. All settings in"
+ print " make.{conf,globals,defaults} and the environment show up if"
+ print " run with the '--verbose' flag."
+ print
+ print " "+green("--metadata")
+ print " Transfers metadata cache from ${PORTDIR}/metadata/cache/ to"
+ print " /var/cache/edb/dep/ as is normally done on the tail end of an"
+ print " rsync update using " + bold("emerge --sync") + ". This process populates the"
+ print " cache database that portage uses for pre-parsed lookups of"
+ print " package data. It does not populate cache for the overlays"
+ print " listed in PORTDIR_OVERLAY. In order to generate cache for"
+ print " overlays, use " + bold("--regen") + "."
+ print
+ print " "+green("--prune")+" ("+green("-P")+" short option)"
+ print " "+turquoise("WARNING: This action can remove important packages!")
+ print " Removes all but the most recently installed version of a package"
+ print " from your system. This action doesn't verify the possible binary"
+ print " compatibility between versions and can thus remove essential"
+ print " dependencies from your system."
+ print " The argument format is the same as for the "+bold("--clean")+" action."
+ print
+ print " "+green("--regen")
+ print " Causes portage to check and update the dependency cache of all"
+ print " ebuilds in the portage tree. This is not recommended for rsync"
+ print " users as rsync updates the cache using server-side caches."
+ print " Rsync users should simply 'emerge --sync' to regenerate."
+ print
+ print " "+green("--resume")
+ print " Resumes the last merge operation. It can be treated just like a"
+ print " regular emerge: --pretend and other options work alongside it."
+ print " 'emerge --resume' only returns an error on failure. When there is"
+ print " nothing to do, it exits with a message and a success condition."
+ print
+ print " "+green("--search")+" ("+green("-s")+" short option)"
+ print " Searches for matches of the supplied string in the current local"
+ print " portage tree. By default emerge uses a case-insensitive simple "
+ print " search, but you can enable a regular expression search by "
+ print " prefixing the search string with %."
+ print " Prepending the expression with a '@' will cause the category to"
+ print " be included in the search."
+ print " A few examples:"
+ print " "+bold("emerge --search libc")
+ print " list all packages that contain libc in their name"
+ print " "+bold("emerge --search '%^kde'")
+ print " list all packages starting with kde"
+ print " "+bold("emerge --search '%gcc$'")
+ print " list all packages ending with gcc"
+ print " "+bold("emerge --search '%@^dev-java.*jdk'")
+ print " list all available Java JDKs"
+ print
+ print " "+green("--searchdesc")+" ("+green("-S")+" short option)"
+ print " Matches the search string against the description field as well"
+ print " the package's name. Take caution as the descriptions are also"
+ print " matched as regular expressions."
+ print " emerge -S html"
+ print " emerge -S applet"
+ print " emerge -S 'perl.*module'"
+ print
+ print " "+green("--unmerge")+" ("+green("-C")+" short option)"
+ print " "+turquoise("WARNING: This action can remove important packages!")
+ print " Removes all matching packages "+bold("completely")+" from"
+ print " your system. Specify arguments using the dependency specification"
+ print " format described in the "+bold("--clean")+" action above."
+ print
+ print " "+green("--update")+" ("+green("-u")+" short option)"
+ print " Updates packages to the best version available, which may not"
+ print " always be the highest version number due to masking for testing"
+ print " and development. This will also update direct dependencies which"
+ print " may not what you want. Package atoms specified on the command line"
+ print " are greedy, meaning that unspecific atoms may match multiple"
+ print " installed versions of slotted packages."
+ print
+ print " "+green("--version")+" ("+green("-V")+" short option)"
+ print " Displays the currently installed version of portage along with"
+ print " other information useful for quick reference on a system. See"
+ print " "+bold("emerge info")+" for more advanced information."
+ print
+ print turquoise("Options:")
+ print " "+green("--alphabetical")
+ print " When displaying USE and other flag output, combines the enabled"
+ print " and disabled flags into a single list and sorts it alphabetically."
+ print " With this option, output such as USE=\"dar -bar -foo\" will instead"
+ print " be displayed as USE=\"-bar dar -foo\""
+ print
+ print " "+green("--ask")+" ("+green("-a")+" short option)"
+ print " before performing the merge, display what ebuilds and tbz2s will"
+ print " be installed, in the same format as when using --pretend; then"
+ print " ask whether to continue with the merge or abort. Using --ask is"
+ print " more efficient than using --pretend and then executing the same"
+ print " command without --pretend, as dependencies will only need to be"
+ print " calculated once. WARNING: If the \"Enter\" key is pressed at the"
+ print " prompt (with no other input), it is interpreted as acceptance of"
+ print " the first choice. Note that the input buffer is not cleared prior"
+ print " to the prompt, so an accidental press of the \"Enter\" key at any"
+ print " time prior to the prompt will be interpreted as a choice!"
+ print
+ print " "+green("--buildpkg")+" ("+green("-b")+" short option)"
+ print " Tell emerge to build binary packages for all ebuilds processed"
+ print " (in addition to actually merging the packages. Useful for"
+ print " maintainers or if you administrate multiple Gentoo Linux"
+ print " systems (build once, emerge tbz2s everywhere) as well as disaster"
+ print " recovery."
+ print
+ print " "+green("--buildpkgonly")+" ("+green("-B")+" short option)"
+ print " Creates a binary package, but does not merge it to the"
+ print " system. This has the restriction that unsatisfied dependencies"
+ print " must not exist for the desired package as they cannot be used if"
+ print " they do not exist on the system."
+ print
+ print " "+green("--changelog")+" ("+green("-l")+" short option)"
+ print " When pretending, also display the ChangeLog entries for packages"
+ print " that will be upgraded."
+ print
+ print " "+green("--color") + " < " + turquoise("y") + " | "+ turquoise("n")+" >"
+ print " Enable or disable color output. This option will override NOCOLOR"
+ print " (see make.conf(5)) and may also be used to force color output when"
+ print " stdout is not a tty (by default, color is disabled unless stdout"
+ print " is a tty)."
+ print
+ print " "+green("--columns")
+ print " Display the pretend output in a tabular form. Versions are"
+ print " aligned vertically."
+ print
+ print " "+green("--debug")+" ("+green("-d")+" short option)"
+ print " Tell emerge to run the ebuild command in --debug mode. In this"
+ print " mode, the bash build environment will run with the -x option,"
+ print " causing it to output verbose debug information print to stdout."
+ print " --debug is great for finding bash syntax errors as providing"
+ print " very verbose information about the dependency and build process."
+ print
+ print " "+green("--deep")+" ("+green("-D")+" short option)"
+ print " This flag forces emerge to consider the entire dependency tree of"
+ print " packages, instead of checking only the immediate dependencies of"
+ print " the packages. As an example, this catches updates in libraries"
+ print " that are not directly listed in the dependencies of a package."
+ print " Also see --with-bdeps for behavior with respect to build time"
+ print " dependencies that are not strictly required."
+ print
+ print " "+green("--emptytree")+" ("+green("-e")+" short option)"
+ print " Virtually tweaks the tree of installed packages to contain"
+ print " nothing. This is great to use together with --pretend. This makes"
+ print " it possible for developers to get a complete overview of the"
+ print " complete dependency tree of a certain package."
+ print
+ print " "+green("--fetchonly")+" ("+green("-f")+" short option)"
+ print " Instead of doing any package building, just perform fetches for"
+ print " all packages (main package as well as all dependencies.) When"
+ print " used in combination with --pretend all the SRC_URIs will be"
+ print " displayed multiple mirrors per line, one line per file."
+ print
+ print " "+green("--fetch-all-uri")+" ("+green("-F")+" short option)"
+ print " Same as --fetchonly except that all package files, including those"
+ print " not required to build the package, will be processed."
+ print
+ print " "+green("--getbinpkg")+" ("+green("-g")+" short option)"
+ print " Using the server and location defined in PORTAGE_BINHOST, portage"
+ print " will download the information from each binary file there and it"
+ print " will use that information to help build the dependency list. This"
+ print " option implies '-k'. (Use -gK for binary-only merging.)"
+ print
+ print " "+green("--getbinpkgonly")+" ("+green("-G")+" short option)"
+ print " This option is identical to -g, as above, except it will not use"
+ print " ANY information from the local machine. All binaries will be"
+ print " downloaded from the remote server without consulting packages"
+ print " existing in the packages directory."
+ print
+ print " "+green("--newuse")+" ("+green("-N")+" short option)"
+ print " Tells emerge to include installed packages where USE flags have "
+ print " changed since installation."
+ print
+ print " "+green("--noconfmem")
+ print " Portage keeps track of files that have been placed into"
+ print " CONFIG_PROTECT directories, and normally it will not merge the"
+ print " same file more than once, as that would become annoying. This"
+ print " can lead to problems when the user wants the file in the case"
+ print " of accidental deletion. With this option, files will always be"
+ print " merged to the live fs instead of silently dropped."
+ print
+ print " "+green("--nodeps")+" ("+green("-O")+" short option)"
+ print " Merge specified packages, but don't merge any dependencies."
+ print " Note that the build may fail if deps aren't satisfied."
+ print
+ print " "+green("--noreplace")+" ("+green("-n")+" short option)"
+ print " Skip the packages specified on the command-line that have"
+ print " already been installed. Without this option, any packages,"
+ print " ebuilds, or deps you specify on the command-line *will* cause"
+ print " Portage to remerge the package, even if it is already installed."
+ print " Note that Portage won't remerge dependencies by default."
+ print
+ print " "+green("--nospinner")
+ print " Disables the spinner regardless of terminal type."
+ print
+ print " "+green("--oneshot")+" ("+green("-1")+" short option)"
+ print " Emerge as normal, but don't add packages to the world profile."
+ print " This package will only be updated if it is depended upon by"
+ print " another package."
+ print
+ print " "+green("--onlydeps")+" ("+green("-o")+" short option)"
+ print " Only merge (or pretend to merge) the dependencies of the"
+ print " specified packages, not the packages themselves."
+ print
+ print " "+green("--pretend")+" ("+green("-p")+" short option)"
+ print " Instead of actually performing the merge, simply display what"
+ print " ebuilds and tbz2s *would* have been installed if --pretend"
+ print " weren't used. Using --pretend is strongly recommended before"
+ print " installing an unfamiliar package. In the printout, N = new,"
+ print " U = updating, R = replacing, F = fetch restricted, B = blocked"
+ print " by an already installed package, D = possible downgrading,"
+ print " S = slotted install. --verbose causes affecting use flags to be"
+ print " printed out accompanied by a '+' for enabled and a '-' for"
+ print " disabled USE flags."
+ print
+ print " "+green("--quiet")+" ("+green("-q")+" short option)"
+ print " Effects vary, but the general outcome is a reduced or condensed"
+ print " output from portage's displays."
+ print
+ print " "+green("--skipfirst")
+ print " This option is only valid in a resume situation. It removes the"
+ print " first package in the resume list so that a merge may continue in"
+ print " the presence of an uncorrectable or inconsequential error. This"
+ print " should only be used in cases where skipping the package will not"
+ print " result in failed dependencies."
+ print
+ print " "+green("--tree")+" ("+green("-t")+" short option)"
+ print " Shows the dependency tree using indentation for dependencies."
+ print " The packages are also listed in reverse merge order so that"
+ print " a package's dependencies follow the package. Only really useful"
+ print " in combination with --emptytree, --update or --deep."
+ print
+ print " "+green("--usepkg")+" ("+green("-k")+" short option)"
+ print " Tell emerge to use binary packages (from $PKGDIR) if they are"
+ print " available, thus possibly avoiding some time-consuming compiles."
+ print " This option is useful for CD installs; you can export"
+ print " PKGDIR=/mnt/cdrom/packages and then use this option to have"
+ print " emerge \"pull\" binary packages from the CD in order to satisfy"
+ print " dependencies."
+ print
+ print " "+green("--usepkgonly")+" ("+green("-K")+" short option)"
+ print " Like --usepkg above, except this only allows the use of binary"
+ print " packages, and it will abort the emerge if the package is not"
+ print " available at the time of dependency calculation."
+ print
+ print " "+green("--verbose")+" ("+green("-v")+" short option)"
+ print " Effects vary, but the general outcome is an increased or expanded"
+ print " display of content in portage's displays."
+ print
+ print " "+green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" >"
+ print " In dependency calculations, pull in build time dependencies that"
+ print " are not strictly required. This defaults to 'n' for installation"
+ print " actions and 'y' for the --depclean action. This setting can be"
+ print " added to EMERGE_DEFAULT_OPTS (see make.conf(5)) and later"
+ print " overridden via the command line."
+ print
+ elif myaction == "sync":
+ print
+ print bold("Usage: ")+turquoise("emerge")+" "+turquoise("--sync")
+ print
+ print " 'emerge --sync' tells emerge to update the Portage tree as specified in"
+ print " The SYNC variable found in /etc/make.conf. By default, SYNC instructs"
+ print " emerge to perform an rsync-style update with rsync.gentoo.org."
+ print
+ print " 'emerge-webrsync' exists as a helper app to emerge --sync, providing a"
+ print " method to receive the entire portage tree as a tarball that can be"
+ print " extracted and used. First time syncs would benefit greatly from this."
+ print
+ print " "+turquoise("WARNING:")
+ print " If using our rsync server, emerge will clean out all files that do not"
+ print " exist on it, including ones that you may have created. The exceptions"
+ print " to this are the distfiles, local and packages directories."
+ print
+ elif myaction=="system":
+ print
+ print bold("Usage: ")+turquoise("emerge")+" [ "+green("options")+" ] "+turquoise("system")
+ print
+ print " \"emerge system\" is the Portage system update command. When run, it"
+ print " will scan the etc/make.profile/packages file and determine what"
+ print " packages need to be installed so that your system meets the minimum"
+ print " requirements of your current system profile. Note that this doesn't"
+ print " necessarily bring your system up-to-date at all; instead, it just"
+ print " ensures that you have no missing parts. For example, if your system"
+ print " profile specifies that you should have sys-apps/iptables installed"
+ print " and you don't, then \"emerge system\" will install it (the most"
+ print " recent version that matches the profile spec) for you. It's always a"
+ print " good idea to do an \"emerge --pretend system\" before an \"emerge"
+ print " system\", just so you know what emerge is planning to do."
+ print
+ elif myaction=="world":
+ print
+ print bold("Usage: ")+turquoise("emerge")+" [ "+green("options")+" ] "+turquoise("world")
+ print
+ print " 'emerge world' is the Portage command for completely updating your"
+ print " system. The normal procedure is to first do an 'emerge --sync' and"
+ print " then an 'emerge --update --deep world'. The first command brings your"
+ print " local Portage tree up-to-date with the latest version information and"
+ print " ebuilds. The second command then rebuilds all packages for which newer"
+ print " versions or newer ebuilds have become available since you last did a"
+ print " sync and update."
+ print
+ elif myaction=="config":
+ outstuff=green("Config file management support (preliminary)")+"""
+
+Portage has a special feature called "config file protection". The purpose of
+this feature is to prevent new package installs from clobbering existing
+configuration files. By default, config file protection is turned on for /etc
+and the KDE configuration dirs; more may be added in the future.
+
+When Portage installs a file into a protected directory tree like /etc, any
+existing files will not be overwritten. If a file of the same name already
+exists, Portage will change the name of the to-be-installed file from 'foo' to
+'._cfg0000_foo'. If '._cfg0000_foo' already exists, this name becomes
+'._cfg0001_foo', etc. In this way, existing files are not overwritten,
+allowing the administrator to manually merge the new config files and avoid any
+unexpected changes.
+
+In addition to protecting overwritten files, Portage will not delete any files
+from a protected directory when a package is unmerged. While this may be a
+little bit untidy, it does prevent potentially valuable config files from being
+deleted, which is of paramount importance.
+
+Protected directories are set using the CONFIG_PROTECT variable, normally
+defined in /etc/make.globals. Directory exceptions to the CONFIG_PROTECTed
+directories can be specified using the CONFIG_PROTECT_MASK variable. To find
+files that need to be updated in /etc, type:
+
+# find /etc -iname '._cfg????_*'
+
+You can disable this feature by setting CONFIG_PROTECT="-*" in /etc/make.conf.
+Then, Portage will mercilessly auto-update your config files. Alternatively,
+you can leave Config File Protection on but tell Portage that it can overwrite
+files in certain specific /etc subdirectories. For example, if you wanted
+Portage to automatically update your rc scripts and your wget configuration,
+but didn't want any other changes made without your explicit approval, you'd
+add this to /etc/make.conf:
+
+CONFIG_PROTECT_MASK="/etc/wget /etc/rc.d"
+
+Tools such as dispatch-conf, cfg-update, and etc-update are also available to
+aid in the merging of these files. They provide interactive merging and can
+auto-merge trivial changes.
+
+"""
+ print outstuff
+
diff --git a/pym/portage/exception.py b/pym/portage/exception.py
new file mode 100644
index 00000000..4be72cf9
--- /dev/null
+++ b/pym/portage/exception.py
@@ -0,0 +1,100 @@
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+class PortageException(Exception):
+ """General superclass for portage exceptions"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ if isinstance(self.value, basestring):
+ return self.value
+ else:
+ return repr(self.value)
+
+class CorruptionError(PortageException):
+ """Corruption indication"""
+
+class InvalidDependString(PortageException):
+ """An invalid depend string has been encountered"""
+
+class InvalidVersionString(PortageException):
+ """An invalid version string has been encountered"""
+
+class SecurityViolation(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+
+class IncorrectParameter(PortageException):
+ """A parameter of the wrong type was passed"""
+
+class MissingParameter(PortageException):
+ """A parameter is required for the action requested but was not passed"""
+
+class ParseError(PortageException):
+ """An error was generated while attempting to parse the request"""
+
+class InvalidData(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+
+class InvalidDataType(PortageException):
+ """An incorrect type was passed instead of the expected one"""
+
+class InvalidLocation(PortageException):
+ """Data was not found when it was expected to exist or was specified incorrectly"""
+
+class FileNotFound(InvalidLocation):
+ """A file was not found when it was expected to exist"""
+
+class DirectoryNotFound(InvalidLocation):
+ """A directory was not found when it was expected to exist"""
+
+class OperationNotPermitted(PortageException):
+ """An operation was not permitted operating system"""
+
+class PermissionDenied(PortageException):
+ """Permission denied"""
+
+class ReadOnlyFileSystem(PortageException):
+ """Read-only file system"""
+
+class CommandNotFound(PortageException):
+ """A required binary was not available or executable"""
+
+
+class PortagePackageException(PortageException):
+ """Malformed or missing package data"""
+
+class PackageNotFound(PortagePackageException):
+ """Missing Ebuild or Binary"""
+
+class InvalidPackageName(PortagePackageException):
+ """Malformed package name"""
+
+class InvalidAtom(PortagePackageException):
+ """Malformed atom spec"""
+
+class UnsupportedAPIException(PortagePackageException):
+ """Unsupported API"""
+ def __init__(self, cpv, eapi):
+ self.cpv, self.eapi = cpv, eapi
+ def __str__(self):
+ return "Unable to do any operations on '%s', due to the fact it's EAPI is higher then this portage versions. Please upgrade to a portage version that supports EAPI %s" % (self.cpv, self.eapi)
+
+
+
+class SignatureException(PortageException):
+ """Signature was not present in the checked file"""
+
+class DigestException(SignatureException):
+ """A problem exists in the digest"""
+
+class MissingSignature(SignatureException):
+ """Signature was not present in the checked file"""
+
+class InvalidSignature(SignatureException):
+ """Signature was checked and was not a valid, current, nor trusted signature"""
+
+class UntrustedSignature(SignatureException):
+ """Signature was not certified to the desired security level"""
+
diff --git a/pym/portage/exec.py b/pym/portage/exec.py
new file mode 100644
index 00000000..252fed2a
--- /dev/null
+++ b/pym/portage/exec.py
@@ -0,0 +1,336 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+import os, atexit, signal, sys
+import portage_data
+
+from portage_util import dump_traceback
+from portage_const import BASH_BINARY, SANDBOX_BINARY
+
+
+try:
+ import resource
+ max_fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+except ImportError:
+ max_fd_limit = 256
+
+if os.path.isdir("/proc/%i/fd" % os.getpid()):
+ def get_open_fds():
+ return map(int, [fd for fd in os.listdir("/proc/%i/fd" % os.getpid()) if fd.isdigit()])
+else:
+ def get_open_fds():
+ return xrange(max_fd_limit)
+
+sandbox_capable = (os.path.isfile(SANDBOX_BINARY) and
+ os.access(SANDBOX_BINARY, os.X_OK))
+
+def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
+ """
+ Spawns a bash shell running a specific commands
+
+ @param mycommand: The command for bash to run
+ @type mycommand: String
+ @param debug: Turn bash debugging on (set -x)
+ @type debug: Boolean
+ @param opt_name: Name of the spawned process (detaults to binary name)
+ @type opt_name: String
+ @param keywords: Extra Dictionary arguments to pass to spawn
+ @type keywords: Dictionary
+ """
+
+ args = [BASH_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ if debug:
+ # Print commands and their arguments as they are executed.
+ args.append("-x")
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_sandbox(mycommand, opt_name=None, **keywords):
+ if not sandbox_capable:
+ return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+ args=[SANDBOX_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+_exithandlers = []
+def atexit_register(func, *args, **kargs):
+ """Wrapper around atexit.register that is needed in order to track
+ what is registered. For example, when portage restarts itself via
+ os.execv, the atexit module does not work so we have to do it
+ manually by calling the run_exitfuncs() function in this module."""
+ _exithandlers.append((func, args, kargs))
+
+def run_exitfuncs():
+ """This should behave identically to the routine performed by
+ the atexit module at exit time. It's only necessary to call this
+ function when atexit will not work (because of os.execv, for
+ example)."""
+
+ # This function is a copy of the private atexit._run_exitfuncs()
+ # from the python 2.4.2 sources. The only difference from the
+ # original function is in the output to stderr.
+ exc_info = None
+ while _exithandlers:
+ func, targs, kargs = _exithandlers.pop()
+ try:
+ func(*targs, **kargs)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ except: # No idea what they called, so we need this broad except here.
+ dump_traceback("Error in portage_exec.run_exitfuncs", noiselevel=0)
+ exc_info = sys.exc_info()
+
+ if exc_info is not None:
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+atexit.register(run_exitfuncs)
+
+# We need to make sure that any processes spawned are killed off when
+# we exit. spawn() takes care of adding and removing pids to this list
+# as it creates and cleans up processes.
+spawned_pids = []
+def cleanup():
+ while spawned_pids:
+ pid = spawned_pids.pop()
+ try:
+ if os.waitpid(pid, os.WNOHANG) == (0, 0):
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+ except OSError:
+ # This pid has been cleaned up outside
+ # of spawn().
+ pass
+
+atexit_register(cleanup)
+
+def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
+ uid=None, gid=None, groups=None, umask=None, logfile=None,
+ path_lookup=True):
+ """
+ Spawns a given command.
+
+ @param mycommand: the command to execute
+ @type mycommand: String or List (Popen style list)
+ @param env: A dict of Key=Value pairs for env variables
+ @type env: Dictionary
+ @param opt_name: an optional name for the spawn'd process (defaults to the binary name)
+ @type opt_name: String
+ @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
+ @type fd_pipes: Dictionary
+ @param returnpid: Return the Process IDs for a successful spawn.
+ NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
+ @type returnpid: Boolean
+ @param uid: User ID to spawn as; useful for dropping privilages
+ @type uid: Integer
+ @param gid: Group ID to spawn as; useful for dropping privilages
+ @type gid: Integer
+ @param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
+ @type groups: List
+ @param umask: An integer representing the umask for the process (see man chmod for umask details)
+ @type umask: Integer
+ @param logfile: name of a file to use for logging purposes
+ @type logfile: String
+ @param path_lookup: If the binary is not fully specified then look for it in PATH
+ @type path_lookup: Boolean
+
+ logfile requires stdout and stderr to be assigned to this process (ie not pointed
+ somewhere else.)
+
+ """
+
+ # mycommand is either a str or a list
+ if isinstance(mycommand, str):
+ mycommand = mycommand.split()
+
+ # If an absolute path to an executable file isn't given
+ # search for it unless we've been told not to.
+ binary = mycommand[0]
+ if (not os.path.isabs(binary) or not os.path.isfile(binary)
+ or not os.access(binary, os.X_OK)):
+ binary = path_lookup and find_binary(binary) or None
+ if not binary:
+ return -1
+
+ # If we haven't been told what file descriptors to use
+ # default to propogating our stdin, stdout and stderr.
+ if fd_pipes is None:
+ fd_pipes = {0:0, 1:1, 2:2}
+
+ # mypids will hold the pids of all processes created.
+ mypids = []
+
+ if logfile:
+ # Using a log file requires that stdout and stderr
+ # are assigned to the process we're running.
+ if 1 not in fd_pipes or 2 not in fd_pipes:
+ raise ValueError(fd_pipes)
+
+ # Create a pipe
+ (pr, pw) = os.pipe()
+
+ # Create a tee process, giving it our stdout and stderr
+ # as well as the read end of the pipe.
+ mypids.extend(spawn(('tee', '-i', '-a', logfile),
+ returnpid=True, fd_pipes={0:pr,
+ 1:fd_pipes[1], 2:fd_pipes[2]}))
+
+ # We don't need the read end of the pipe, so close it.
+ os.close(pr)
+
+ # Assign the write end of the pipe to our stdout and stderr.
+ fd_pipes[1] = pw
+ fd_pipes[2] = pw
+
+ pid = os.fork()
+
+ if not pid:
+ try:
+ _exec(binary, mycommand, opt_name, fd_pipes,
+ env, gid, groups, uid, umask)
+ except Exception, e:
+ # We need to catch _any_ exception so that it doesn't
+ # propogate out of this function and cause exiting
+ # with anything other than os._exit()
+ sys.stderr.write("%s:\n %s\n" % (e, " ".join(mycommand)))
+ sys.stderr.flush()
+ os._exit(1)
+
+ # Add the pid to our local and the global pid lists.
+ mypids.append(pid)
+ spawned_pids.append(pid)
+
+ # If we started a tee process the write side of the pipe is no
+ # longer needed, so close it.
+ if logfile:
+ os.close(pw)
+
+ # If the caller wants to handle cleaning up the processes, we tell
+ # it about all processes that were created.
+ if returnpid:
+ return mypids
+
+ # Otherwise we clean them up.
+ while mypids:
+
+ # Pull the last reader in the pipe chain. If all processes
+ # in the pipe are well behaved, it will die when the process
+ # it is reading from dies.
+ pid = mypids.pop(0)
+
+ # and wait for it.
+ retval = os.waitpid(pid, 0)[1]
+
+ # When it's done, we can remove it from the
+ # global pid list as well.
+ spawned_pids.remove(pid)
+
+ if retval:
+ # If it failed, kill off anything else that
+ # isn't dead yet.
+ for pid in mypids:
+ if os.waitpid(pid, os.WNOHANG) == (0,0):
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+ spawned_pids.remove(pid)
+
+ # If it got a signal, return the signal that was sent.
+ if (retval & 0xff):
+ return ((retval & 0xff) << 8)
+
+ # Otherwise, return its exit code.
+ return (retval >> 8)
+
+ # Everything succeeded
+ return 0
+
+def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask):
+
+ """
+ Execute a given binary with options
+
+ @param binary: Name of program to execute
+ @type binary: String
+ @param mycommand: Options for program
+ @type mycommand: String
+ @param opt_name: Name of process (defaults to binary)
+ @type opt_name: String
+ @param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 }
+ @type fd_pipes: Dictionary
+ @param env: Key,Value mapping for Environmental Variables
+ @type env: Dictionary
+ @param gid: Group ID to run the process under
+ @type gid: Integer
+ @param groups: Groups the Process should be in.
+ @type groups: Integer
+ @param uid: User ID to run the process under
+ @type uid: Integer
+ @param umask: an int representing a unix umask (see man chmod for umask details)
+ @type umask: Integer
+ @rtype: None
+ @returns: Never returns (calls os.execve)
+ """
+
+ # If the process we're creating hasn't been given a name
+ # assign it the name of the executable.
+ if not opt_name:
+ opt_name = os.path.basename(binary)
+
+ # Set up the command's argument list.
+ myargs = [opt_name]
+ myargs.extend(mycommand[1:])
+
+ # Set up the command's pipes.
+ my_fds = {}
+ # To protect from cases where direct assignment could
+ # clobber needed fds ({1:2, 2:1}) we first dupe the fds
+ # into unused fds.
+ for fd in fd_pipes:
+ my_fds[fd] = os.dup(fd_pipes[fd])
+ # Then assign them to what they should be.
+ for fd in my_fds:
+ os.dup2(my_fds[fd], fd)
+ # Then close _all_ fds that haven't been explictly
+ # requested to be kept open.
+ for fd in get_open_fds():
+ if fd not in my_fds:
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+
+ # Set requested process permissions.
+ if gid:
+ os.setgid(gid)
+ if groups:
+ os.setgroups(groups)
+ if uid:
+ os.setuid(uid)
+ if umask:
+ os.umask(umask)
+
+ # And switch to the new process.
+ os.execve(binary, myargs, env)
+
+def find_binary(binary):
+ """
+ Given a binary name, find the binary in PATH
+
+ @param binary: Name of the binary to find
+ @type string
+ @rtype: None or string
+ @returns: full path to binary or None if the binary could not be located.
+ """
+
+ for path in os.getenv("PATH", "").split(":"):
+ filename = "%s/%s" % (path, binary)
+ if os.access(filename, os.X_OK) and os.path.isfile(filename):
+ return filename
+ return None
diff --git a/pym/portage/getbinpkg.py b/pym/portage/getbinpkg.py
new file mode 100644
index 00000000..462da429
--- /dev/null
+++ b/pym/portage/getbinpkg.py
@@ -0,0 +1,572 @@
+# getbinpkg.py -- Portage binary-package helper functions
+# Copyright 2003-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+
+from output import red, yellow, green
+import htmllib,HTMLParser,formatter,sys,os,xpak,time,tempfile,base64,urllib2
+
+try:
+ import cPickle
+except ImportError:
+ import pickle as cPickle
+
+try:
+ import ftplib
+except SystemExit, e:
+ raise
+except Exception, e:
+ sys.stderr.write(red("!!! CANNOT IMPORT FTPLIB: ")+str(e)+"\n")
+
+try:
+ import httplib
+except SystemExit, e:
+ raise
+except Exception, e:
+ sys.stderr.write(red("!!! CANNOT IMPORT HTTPLIB: ")+str(e)+"\n")
+
+def make_metadata_dict(data):
+ myid,myglob = data
+
+ mydict = {}
+ for x in xpak.getindex_mem(myid):
+ mydict[x] = xpak.getitem(data,x)
+
+ return mydict
+
+class ParseLinks(HTMLParser.HTMLParser):
+ """Parser class that overrides HTMLParser to grab all anchors from an html
+ page and provide suffix and prefix limitors"""
+ def __init__(self):
+ self.PL_anchors = []
+ HTMLParser.HTMLParser.__init__(self)
+
+ def get_anchors(self):
+ return self.PL_anchors
+
+ def get_anchors_by_prefix(self,prefix):
+ newlist = []
+ for x in self.PL_anchors:
+ if x.startswith(prefix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def get_anchors_by_suffix(self,suffix):
+ newlist = []
+ for x in self.PL_anchors:
+ if x.endswith(suffix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def handle_endtag(self,tag):
+ pass
+
+ def handle_starttag(self,tag,attrs):
+ if tag == "a":
+ for x in attrs:
+ if x[0] == 'href':
+ if x[1] not in self.PL_anchors:
+ self.PL_anchors.append(urllib2.unquote(x[1]))
+
+
+def create_conn(baseurl,conn=None):
+ """(baseurl,conn) --- Takes a protocol://site:port/address url, and an
+ optional connection. If connection is already active, it is passed on.
+ baseurl is reduced to address and is returned in tuple (conn,address)"""
+ parts = baseurl.split("://",1)
+ if len(parts) != 2:
+ raise ValueError, "Provided URL does not contain protocol identifier. '%s'" % baseurl
+ protocol,url_parts = parts
+ del parts
+ host,address = url_parts.split("/",1)
+ del url_parts
+ address = "/"+address
+
+ userpass_host = host.split("@",1)
+ if len(userpass_host) == 1:
+ host = userpass_host[0]
+ userpass = ["anonymous"]
+ else:
+ host = userpass_host[1]
+ userpass = userpass_host[0].split(":")
+ del userpass_host
+
+ if len(userpass) > 2:
+ raise ValueError, "Unable to interpret username/password provided."
+ elif len(userpass) == 2:
+ username = userpass[0]
+ password = userpass[1]
+ elif len(userpass) == 1:
+ username = userpass[0]
+ password = None
+ del userpass
+
+ http_headers = {}
+ http_params = {}
+ if username and password:
+ http_headers = {
+ "Authorization": "Basic %s" %
+ base64.encodestring("%s:%s" % (username, password)).replace(
+ "\012",
+ ""
+ ),
+ }
+
+ if not conn:
+ if protocol == "https":
+ conn = httplib.HTTPSConnection(host)
+ elif protocol == "http":
+ conn = httplib.HTTPConnection(host)
+ elif protocol == "ftp":
+ passive = 1
+ if(host[-1] == "*"):
+ passive = 0
+ host = host[:-1]
+ conn = ftplib.FTP(host)
+ if password:
+ conn.login(username,password)
+ else:
+ sys.stderr.write(yellow(" * No password provided for username")+" '"+str(username)+"'\n\n")
+ conn.login(username)
+ conn.set_pasv(passive)
+ conn.set_debuglevel(0)
+ else:
+ raise NotImplementedError, "%s is not a supported protocol." % protocol
+
+ return (conn,protocol,address, http_params, http_headers)
+
+def make_ftp_request(conn, address, rest=None, dest=None):
+ """(conn,address,rest) --- uses the conn object to request the data
+ from address and issuing a rest if it is passed."""
+ try:
+
+ if dest:
+ fstart_pos = dest.tell()
+
+ conn.voidcmd("TYPE I")
+ fsize = conn.size(address)
+
+ if (rest != None) and (rest < 0):
+ rest = fsize+int(rest)
+ if rest < 0:
+ rest = 0
+
+ if rest != None:
+ mysocket = conn.transfercmd("RETR "+str(address), rest)
+ else:
+ mysocket = conn.transfercmd("RETR "+str(address))
+
+ mydata = ""
+ while 1:
+ somedata = mysocket.recv(8192)
+ if somedata:
+ if dest:
+ dest.write(somedata)
+ else:
+ mydata = mydata + somedata
+ else:
+ break
+
+ if dest:
+ data_size = fstart_pos - dest.tell()
+ else:
+ data_size = len(mydata)
+
+ mysocket.close()
+ conn.voidresp()
+ conn.voidcmd("TYPE A")
+
+ return mydata,not (fsize==data_size),""
+
+ except ValueError, e:
+ return None,int(str(e)[:4]),str(e)
+
+
+def make_http_request(conn, address, params={}, headers={}, dest=None):
+ """(conn,address,params,headers) --- uses the conn object to request
+ the data from address, performing Location forwarding and using the
+ optional params and headers."""
+
+ rc = 0
+ response = None
+ while (rc == 0) or (rc == 301) or (rc == 302):
+ try:
+ if (rc != 0):
+ conn,ignore,ignore,ignore,ignore = create_conn(address)
+ conn.request("GET", address, params, headers)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ return None,None,"Server request failed: "+str(e)
+ response = conn.getresponse()
+ rc = response.status
+
+ # 301 means that the page address is wrong.
+ if ((rc == 301) or (rc == 302)):
+ ignored_data = response.read()
+ del ignored_data
+ for x in str(response.msg).split("\n"):
+ parts = x.split(": ",1)
+ if parts[0] == "Location":
+ if (rc == 301):
+ sys.stderr.write(red("Location has moved: ")+str(parts[1])+"\n")
+ if (rc == 302):
+ sys.stderr.write(red("Location has temporarily moved: ")+str(parts[1])+"\n")
+ address = parts[1]
+ break
+
+ if (rc != 200) and (rc != 206):
+ sys.stderr.write(str(response.msg)+"\n")
+ sys.stderr.write(response.read()+"\n")
+ sys.stderr.write("address: "+address+"\n")
+ return None,rc,"Server did not respond successfully ("+str(response.status)+": "+str(response.reason)+")"
+
+ if dest:
+ dest.write(response.read())
+ return "",0,""
+
+ return response.read(),0,""
+
+
+def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
+ myarray = []
+
+ if not (prefix and suffix):
+ match_both = 0
+
+ for x in array:
+ add_p = 0
+ if prefix and (len(x) >= len(prefix)) and (x[:len(prefix)] == prefix):
+ add_p = 1
+
+ if match_both:
+ if prefix and not add_p: # Require both, but don't have first one.
+ continue
+ else:
+ if add_p: # Only need one, and we have it.
+ myarray.append(x[:])
+ continue
+
+ if not allow_overlap: # Not allow to overlap prefix and suffix
+ if len(x) >= (len(prefix)+len(suffix)):
+ y = x[len(prefix):]
+ else:
+ continue # Too short to match.
+ else:
+ y = x # Do whatever... We're overlapping.
+
+ if suffix and (len(x) >= len(suffix)) and (x[-len(suffix):] == suffix):
+ myarray.append(x) # It matches
+ else:
+ continue # Doesn't match.
+
+ return myarray
+
+
+
+def dir_get_list(baseurl,conn=None):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ listing = None
+ if protocol in ["http","https"]:
+ page,rc,msg = make_http_request(conn,address,params,headers)
+
+ if page:
+ parser = ParseLinks()
+ parser.feed(page)
+ del page
+ listing = parser.get_anchors()
+ else:
+ raise Exception, "Unable to get listing: %s %s" % (rc,msg)
+ elif protocol in ["ftp"]:
+ if address[-1] == '/':
+ olddir = conn.pwd()
+ conn.cwd(address)
+ listing = conn.nlst()
+ conn.cwd(olddir)
+ del olddir
+ else:
+ listing = conn.nlst(address)
+ else:
+ raise TypeError, "Unknown protocol. '%s'" % protocol
+
+ if not keepconnection:
+ conn.close()
+
+ return listing
+
+def file_get_metadata(baseurl,conn=None, chunk_size=3000):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ if protocol in ["http","https"]:
+ headers["Range"] = "bytes=-"+str(chunk_size)
+ data,rc,msg = make_http_request(conn, address, params, headers)
+ elif protocol in ["ftp"]:
+ data,rc,msg = make_ftp_request(conn, address, -chunk_size)
+ else:
+ raise TypeError, "Unknown protocol. '%s'" % protocol
+
+ if data:
+ xpaksize = xpak.decodeint(data[-8:-4])
+ if (xpaksize+8) > chunk_size:
+ myid = file_get_metadata(baseurl, conn, (xpaksize+8))
+ if not keepconnection:
+ conn.close()
+ return myid
+ else:
+ xpak_data = data[len(data)-(xpaksize+8):-8]
+ del data
+
+ myid = xpak.xsplit_mem(xpak_data)
+ if not myid:
+ myid = None,None
+ del xpak_data
+ else:
+ myid = None,None
+
+ if not keepconnection:
+ conn.close()
+
+ return myid
+
+
+def file_get(baseurl,dest,conn=None,fcmd=None):
+ """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://[user[:pass]@]<site>[:port]<path>"""
+
+ if not fcmd:
+ return file_get_lib(baseurl,dest,conn)
+
+ fcmd = fcmd.replace("${DISTDIR}",dest)
+ fcmd = fcmd.replace("${URI}", baseurl)
+ fcmd = fcmd.replace("${FILE}", os.path.basename(baseurl))
+ mysplit = fcmd.split()
+ mycmd = mysplit[0]
+ myargs = [os.path.basename(mycmd)]+mysplit[1:]
+ mypid=os.fork()
+ if mypid == 0:
+ try:
+ os.execv(mycmd,myargs)
+ except OSError:
+ pass
+ sys.stderr.write("!!! Failed to spawn fetcher.\n")
+ sys.stderr.flush()
+ os._exit(1)
+ retval=os.waitpid(mypid,0)[1]
+ if (retval & 0xff) == 0:
+ retval = retval >> 8
+ else:
+ sys.stderr.write("Spawned processes caught a signal.\n")
+ sys.exit(1)
+ if retval != 0:
+ sys.stderr.write("Fetcher exited with a failure condition.\n")
+ return 0
+ return 1
+
+def file_get_lib(baseurl,dest,conn=None):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ sys.stderr.write("Fetching '"+str(os.path.basename(address)+"'\n"))
+ if protocol in ["http","https"]:
+ data,rc,msg = make_http_request(conn, address, params, headers, dest=dest)
+ elif protocol in ["ftp"]:
+ data,rc,msg = make_ftp_request(conn, address, dest=dest)
+ else:
+ raise TypeError, "Unknown protocol. '%s'" % protocol
+
+ if not keepconnection:
+ conn.close()
+
+ return rc
+
+
+def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
+ """(baseurl,conn,chunk_size,verbose) --
+ """
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ if makepickle is None:
+ makepickle = "/var/cache/edb/metadata.idx.most_recent"
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ filedict = {}
+
+ try:
+ metadatafile = open("/var/cache/edb/remote_metadata.pickle")
+ metadata = cPickle.load(metadatafile)
+ sys.stderr.write("Loaded metadata pickle.\n")
+ metadatafile.close()
+ except (cPickle.UnpicklingError, OSError, IOError, EOFError):
+ metadata = {}
+ if not metadata.has_key(baseurl):
+ metadata[baseurl]={}
+ if not metadata[baseurl].has_key("indexname"):
+ metadata[baseurl]["indexname"]=""
+ if not metadata[baseurl].has_key("timestamp"):
+ metadata[baseurl]["timestamp"]=0
+ if not metadata[baseurl].has_key("unmodified"):
+ metadata[baseurl]["unmodified"]=0
+ if not metadata[baseurl].has_key("data"):
+ metadata[baseurl]["data"]={}
+
+ filelist = dir_get_list(baseurl, conn)
+ tbz2list = match_in_array(filelist, suffix=".tbz2")
+ metalist = match_in_array(filelist, prefix="metadata.idx")
+ del filelist
+
+ # Determine if our metadata file is current.
+ metalist.sort()
+ metalist.reverse() # makes the order new-to-old.
+ havecache=0
+ for mfile in metalist:
+ if usingcache and \
+ ((metadata[baseurl]["indexname"] != mfile) or \
+ (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))):
+ # Try to download new cache until we succeed on one.
+ data=""
+ for trynum in [1,2,3]:
+ mytempfile = tempfile.TemporaryFile()
+ try:
+ file_get(baseurl+"/"+mfile, mytempfile, conn)
+ if mytempfile.tell() > len(data):
+ mytempfile.seek(0)
+ data = mytempfile.read()
+ except ValueError, e:
+ sys.stderr.write("--- "+str(e)+"\n")
+ if trynum < 3:
+ sys.stderr.write("Retrying...\n")
+ mytempfile.close()
+ continue
+ if match_in_array([mfile],suffix=".gz"):
+ sys.stderr.write("gzip'd\n")
+ try:
+ import gzip
+ mytempfile.seek(0)
+ gzindex = gzip.GzipFile(mfile[:-3],'rb',9,mytempfile)
+ data = gzindex.read()
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ mytempfile.close()
+ sys.stderr.write("!!! Failed to use gzip: "+str(e)+"\n")
+ mytempfile.close()
+ try:
+ metadata[baseurl]["data"] = cPickle.loads(data)
+ del data
+ metadata[baseurl]["indexname"] = mfile
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadata[baseurl]["modified"] = 0 # It's not, right after download.
+ sys.stderr.write("Pickle loaded.\n")
+ break
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ sys.stderr.write("!!! Failed to read data from index: "+str(mfile)+"\n")
+ sys.stderr.write("!!! "+str(e)+"\n")
+ try:
+ metadatafile = open("/var/cache/edb/remote_metadata.pickle", "w+")
+ cPickle.dump(metadata,metadatafile)
+ metadatafile.close()
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ sys.stderr.write("!!! Failed to write binary metadata to disk!\n")
+ sys.stderr.write("!!! "+str(e)+"\n")
+ break
+ # We may have metadata... now we run through the tbz2 list and check.
+ sys.stderr.write(yellow("cache miss: 'x'")+" --- "+green("cache hit: 'o'")+"\n")
+ binpkg_filenames = set()
+ for x in tbz2list:
+ x = os.path.basename(x)
+ binpkg_filenames.add(x)
+ if ((not metadata[baseurl]["data"].has_key(x)) or \
+ (x not in metadata[baseurl]["data"].keys())):
+ sys.stderr.write(yellow("x"))
+ metadata[baseurl]["modified"] = 1
+ myid = None
+ for retry in xrange(3):
+ try:
+ myid = file_get_metadata(
+ "/".join((baseurl.rstrip("/"), x.lstrip("/"))),
+ conn, chunk_size)
+ break
+ except httplib.BadStatusLine:
+ # Sometimes this error is thrown from conn.getresponse() in
+ # make_http_request(). The docstring for this error in
+ # httplib.py says "Presumably, the server closed the
+ # connection before sending a valid response".
+ conn, protocol, address, params, headers = create_conn(
+ baseurl)
+
+ if myid and myid[0]:
+ metadata[baseurl]["data"][x] = make_metadata_dict(myid)
+ elif verbose:
+ sys.stderr.write(red("!!! Failed to retrieve metadata on: ")+str(x)+"\n")
+ else:
+ sys.stderr.write(green("o"))
+ # Cleanse stale cache for files that don't exist on the server anymore.
+ stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
+ if stale_cache:
+ for x in stale_cache:
+ del metadata[baseurl]["data"][x]
+ metadata[baseurl]["modified"] = 1
+ del stale_cache
+ del binpkg_filenames
+ sys.stderr.write("\n")
+
+ try:
+ if metadata[baseurl].has_key("modified") and metadata[baseurl]["modified"]:
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadatafile = open("/var/cache/edb/remote_metadata.pickle", "w+")
+ cPickle.dump(metadata,metadatafile)
+ metadatafile.close()
+ if makepickle:
+ metadatafile = open(makepickle, "w")
+ cPickle.dump(metadata[baseurl]["data"],metadatafile)
+ metadatafile.close()
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ sys.stderr.write("!!! Failed to write binary metadata to disk!\n")
+ sys.stderr.write("!!! "+str(e)+"\n")
+
+ if not keepconnection:
+ conn.close()
+
+ return metadata[baseurl]["data"]
diff --git a/pym/portage/gpg.py b/pym/portage/gpg.py
new file mode 100644
index 00000000..04ed6004
--- /dev/null
+++ b/pym/portage/gpg.py
@@ -0,0 +1,149 @@
+# portage_gpg.py -- core Portage functionality
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+import os
+import copy
+import types
+import commands
+import portage_exception
+import portage_checksum
+
+GPG_BINARY = "/usr/bin/gpg"
+GPG_OPTIONS = " --lock-never --no-random-seed-file --no-greeting --no-sig-cache "
+GPG_VERIFY_FLAGS = " --verify "
+GPG_KEYDIR = " --homedir '%s' "
+GPG_KEYRING = " --keyring '%s' "
+
+UNTRUSTED = 0
+EXISTS = UNTRUSTED + 1
+MARGINAL = EXISTS + 1
+TRUSTED = MARGINAL + 1
+
+def fileStats(filepath):
+ mya = []
+ for x in os.stat(filepath):
+ mya.append(x)
+ mya.append(portage_checksum.perform_checksum(filepath))
+ return mya
+
+
+class FileChecker:
+ def __init__(self,keydir=None,keyring=None,requireSignedRing=False,minimumTrust=EXISTS):
+ self.minimumTrust = TRUSTED # Default we require trust. For rings.
+ self.keydir = None
+ self.keyring = None
+ self.keyringPath = None
+ self.keyringStats = None
+ self.keyringIsTrusted = False
+
+ if (keydir != None):
+ # Verify that the keydir is valid.
+ if type(keydir) != types.StringType:
+ raise portage_exception.InvalidDataType, "keydir argument: %s" % keydir
+ if not os.path.isdir(keydir):
+ raise portage_exception.DirectoryNotFound, "keydir: %s" % keydir
+ self.keydir = copy.deepcopy(keydir)
+
+ if (keyring != None):
+ # Verify that the keyring is a valid filename and exists.
+ if type(keyring) != types.StringType:
+ raise portage_exception.InvalidDataType, "keyring argument: %s" % keyring
+ if keyring.find("/") != -1:
+ raise portage_exception.InvalidData, "keyring: %s" % keyring
+ pathname = ""
+ if keydir:
+ pathname = keydir + "/" + keyring
+ if not os.path.isfile(pathname):
+ raise portage_exception.FileNotFound, "keyring missing: %s (dev.gentoo.org/~carpaski/gpg/)" % pathname
+
+ keyringPath = keydir+"/"+keyring
+
+ if not keyring or not keyringPath and requireSignedRing:
+ raise portage_exception.MissingParameter
+
+ self.keyringStats = fileStats(keyringPath)
+ self.minimumTrust = TRUSTED
+ if not self.verify(keyringPath, keyringPath+".asc"):
+ self.keyringIsTrusted = False
+ if requireSignedRing:
+ raise portage_exception.InvalidSignature, "Required keyring verification: "+keyringPath
+ else:
+ self.keyringIsTrusted = True
+
+ self.keyring = copy.deepcopy(keyring)
+ self.keyringPath = self.keydir+"/"+self.keyring
+ self.minimumTrust = minimumTrust
+
+ def _verifyKeyring(self):
+ if self.keyringStats and self.keyringPath:
+ new_stats = fileStats(self.keyringPath)
+ if new_stats != self.keyringStats:
+ raise portage_exception.SecurityViolation, "GPG keyring changed!"
+
+ def verify(self, filename, sigfile=None):
+ """Uses minimumTrust to determine if it is Valid/True or Invalid/False"""
+ self._verifyKeyring()
+
+ if not os.path.isfile(filename):
+ raise portage_exception.FileNotFound, filename
+
+ if sigfile and not os.path.isfile(sigfile):
+ raise portage_exception.FileNotFound, sigfile
+
+ if self.keydir and not os.path.isdir(self.keydir):
+ raise portage_exception.DirectoryNotFound, filename
+
+ if self.keyringPath:
+ if not os.path.isfile(self.keyringPath):
+ raise portage_exception.FileNotFound, self.keyringPath
+
+ if not os.path.isfile(filename):
+ raise portage_exception.CommandNotFound, filename
+
+ command = GPG_BINARY + GPG_VERIFY_FLAGS + GPG_OPTIONS
+ if self.keydir:
+ command += GPG_KEYDIR % (self.keydir)
+ if self.keyring:
+ command += GPG_KEYRING % (self.keyring)
+
+ if sigfile:
+ command += " '"+sigfile+"'"
+ command += " '"+filename+"'"
+
+ result,output = commands.getstatusoutput(command)
+
+ signal = result & 0xff
+ result = (result >> 8)
+
+ if signal:
+ raise SignalCaught, "Signal: %d" % (signal)
+
+ trustLevel = UNTRUSTED
+ if result == 0:
+ trustLevel = TRUSTED
+ #if output.find("WARNING") != -1:
+ # trustLevel = MARGINAL
+ if output.find("BAD") != -1:
+ raise portage_exception.InvalidSignature, filename
+ elif result == 1:
+ trustLevel = EXISTS
+ if output.find("BAD") != -1:
+ raise portage_exception.InvalidSignature, filename
+ elif result == 2:
+ trustLevel = UNTRUSTED
+ if output.find("could not be verified") != -1:
+ raise portage_exception.MissingSignature, filename
+ if output.find("public key not found") != -1:
+ if self.keyringIsTrusted: # We trust the ring, but not the key specifically.
+ trustLevel = MARGINAL
+ else:
+ raise portage_exception.InvalidSignature, filename+" (Unknown Signature)"
+ else:
+ raise portage_exception.UnknownCondition, "GPG returned unknown result: %d" % (result)
+
+ if trustLevel >= self.minimumTrust:
+ return True
+ return False
diff --git a/pym/portage/localization.py b/pym/portage/localization.py
new file mode 100644
index 00000000..59ccea71
--- /dev/null
+++ b/pym/portage/localization.py
@@ -0,0 +1,21 @@
+# portage_localization.py -- Code to manage/help portage localization.
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+# We define this to make the transition easier for us.
+def _(mystr):
+ return mystr
+
+
+def localization_example():
+ # Dict references allow translators to rearrange word order.
+ print _("You can use this string for translating.")
+ print _("Strings can be formatted with %(mystr)s like this.") % {"mystr": "VALUES"}
+
+ a_value = "value.of.a"
+ b_value = 123
+ c_value = [1,2,3,4]
+ print _("A: %(a)s -- B: %(b)s -- C: %(c)s") % {"a":a_value,"b":b_value,"c":c_value}
+
diff --git a/pym/portage/locks.py b/pym/portage/locks.py
new file mode 100644
index 00000000..28042e2f
--- /dev/null
+++ b/pym/portage/locks.py
@@ -0,0 +1,312 @@
+# portage: Lock management code
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+import errno, os, stat, time, types
+from portage_exception import InvalidData, DirectoryNotFound, FileNotFound
+from portage_data import portage_gid
+from portage_util import writemsg
+from portage_localization import _
+
+HARDLINK_FD = -2
+
+def lockdir(mydir):
+ return lockfile(mydir,wantnewlockfile=1)
+def unlockdir(mylock):
+ return unlockfile(mylock)
+
+def lockfile(mypath,wantnewlockfile=0,unlinkfile=0):
+ """Creates all dirs upto, the given dir. Creates a lockfile
+ for the given directory as the file: directoryname+'.portage_lockfile'."""
+ import fcntl
+
+ if not mypath:
+ raise InvalidData, "Empty path given"
+
+ if type(mypath) == types.StringType and mypath[-1] == '/':
+ mypath = mypath[:-1]
+
+ if type(mypath) == types.FileType:
+ mypath = mypath.fileno()
+ if type(mypath) == types.IntType:
+ lockfilename = mypath
+ wantnewlockfile = 0
+ unlinkfile = 0
+ elif wantnewlockfile:
+ base, tail = os.path.split(mypath)
+ lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
+ del base, tail
+ unlinkfile = 1
+ else:
+ lockfilename = mypath
+
+ if type(mypath) == types.StringType:
+ if not os.path.exists(os.path.dirname(mypath)):
+ raise DirectoryNotFound, os.path.dirname(mypath)
+ if not os.path.exists(lockfilename):
+ old_mask=os.umask(000)
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR,0660)
+ try:
+ if os.stat(lockfilename).st_gid != portage_gid:
+ os.chown(lockfilename,os.getuid(),portage_gid)
+ except OSError, e:
+ if e[0] == 2: # No such file or directory
+ return lockfile(mypath,wantnewlockfile,unlinkfile)
+ else:
+ writemsg("Cannot chown a lockfile. This could cause inconvenience later.\n");
+ os.umask(old_mask)
+ else:
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR,0660)
+
+ elif type(mypath) == types.IntType:
+ myfd = mypath
+
+ else:
+ raise ValueError, "Unknown type passed in '%s': '%s'" % (type(mypath),mypath)
+
+ # try for a non-blocking lock, if it's held, throw a message
+ # we're waiting on lockfile and use a blocking attempt.
+ locking_method = fcntl.lockf
+ try:
+ fcntl.lockf(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except IOError, e:
+ if "errno" not in dir(e):
+ raise
+ if e.errno == errno.EAGAIN:
+ # resource temp unavailable; eg, someone beat us to the lock.
+ if type(mypath) == types.IntType:
+ print "waiting for lock on fd %i" % myfd
+ else:
+ print "waiting for lock on %s" % lockfilename
+ # try for the exclusive lock now.
+ fcntl.lockf(myfd,fcntl.LOCK_EX)
+ elif e.errno == errno.ENOLCK:
+ # We're not allowed to lock on this FS.
+ os.close(myfd)
+ link_success = False
+ if lockfilename == str(lockfilename):
+ if wantnewlockfile:
+ try:
+ if os.stat(lockfilename)[stat.ST_NLINK] == 1:
+ os.unlink(lockfilename)
+ except OSError:
+ pass
+ link_success = hardlink_lockfile(lockfilename)
+ if not link_success:
+ raise
+ locking_method = None
+ myfd = HARDLINK_FD
+ else:
+ raise
+
+
+ if type(lockfilename) == types.StringType and \
+ myfd != HARDLINK_FD and os.fstat(myfd).st_nlink == 0:
+ # The file was deleted on us... Keep trying to make one...
+ os.close(myfd)
+ writemsg("lockfile recurse\n",1)
+ lockfilename,myfd,unlinkfile,locking_method = lockfile(mypath,wantnewlockfile,unlinkfile)
+
+ writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1)
+ return (lockfilename,myfd,unlinkfile,locking_method)
+
+def unlockfile(mytuple):
+ import fcntl
+
+ #XXX: Compatability hack.
+ if len(mytuple) == 3:
+ lockfilename,myfd,unlinkfile = mytuple
+ locking_method = fcntl.flock
+ elif len(mytuple) == 4:
+ lockfilename,myfd,unlinkfile,locking_method = mytuple
+ else:
+ raise InvalidData
+
+ if(myfd == HARDLINK_FD):
+ unhardlink_lockfile(lockfilename)
+ return True
+
+ # myfd may be None here due to myfd = mypath in lockfile()
+ if type(lockfilename) == types.StringType and not os.path.exists(lockfilename):
+ writemsg("lockfile does not exist '%s'\n" % lockfilename,1)
+ if myfd is not None:
+ os.close(myfd)
+ return False
+
+ try:
+ if myfd is None:
+ myfd = os.open(lockfilename, os.O_WRONLY,0660)
+ unlinkfile = 1
+ locking_method(myfd,fcntl.LOCK_UN)
+ except OSError:
+ if type(lockfilename) == types.StringType:
+ os.close(myfd)
+ raise IOError, "Failed to unlock file '%s'\n" % lockfilename
+
+ try:
+ # This sleep call was added to allow other processes that are
+ # waiting for a lock to be able to grab it before it is deleted.
+ # lockfile() already accounts for this situation, however, and
+ # the sleep here adds more time than is saved overall, so am
+ # commenting until it is proved necessary.
+ #time.sleep(0.0001)
+ if unlinkfile:
+ locking_method(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ # We won the lock, so there isn't competition for it.
+ # We can safely delete the file.
+ writemsg("Got the lockfile...\n",1)
+ if os.fstat(myfd).st_nlink == 1:
+ os.unlink(lockfilename)
+ writemsg("Unlinked lockfile...\n",1)
+ locking_method(myfd,fcntl.LOCK_UN)
+ else:
+ writemsg("lockfile does not exist '%s'\n" % lockfilename,1)
+ os.close(myfd)
+ return False
+ except Exception, e:
+ writemsg("Failed to get lock... someone took it.\n",1)
+ writemsg(str(e)+"\n",1)
+
+ # why test lockfilename? because we may have been handed an
+ # fd originally, and the caller might not like having their
+ # open fd closed automatically on them.
+ if type(lockfilename) == types.StringType:
+ os.close(myfd)
+
+ return True
+
+
+
+
+def hardlock_name(path):
+ return path+".hardlock-"+os.uname()[1]+"-"+str(os.getpid())
+
+def hardlink_is_mine(link,lock):
+ try:
+ return os.stat(link).st_nlink == 2
+ except OSError:
+ return False
+
+def hardlink_lockfile(lockfilename, max_wait=14400):
+ """Does the NFS, hardlink shuffle to ensure locking on the disk.
+ We create a PRIVATE lockfile, that is just a placeholder on the disk.
+ Then we HARDLINK the real lockfile to that private file.
+ If our file can 2 references, then we have the lock. :)
+ Otherwise we lather, rise, and repeat.
+ We default to a 4 hour timeout.
+ """
+
+ start_time = time.time()
+ myhardlock = hardlock_name(lockfilename)
+ reported_waiting = False
+
+ while(time.time() < (start_time + max_wait)):
+ # We only need it to exist.
+ myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0660)
+ os.close(myfd)
+
+ if not os.path.exists(myhardlock):
+ raise FileNotFound, _("Created lockfile is missing: %(filename)s") % {"filename":myhardlock}
+
+ try:
+ res = os.link(myhardlock, lockfilename)
+ except OSError:
+ pass
+
+ if hardlink_is_mine(myhardlock, lockfilename):
+ # We have the lock.
+ if reported_waiting:
+ print
+ return True
+
+ if reported_waiting:
+ writemsg(".")
+ else:
+ reported_waiting = True
+ print
+ print "Waiting on (hardlink) lockfile: (one '.' per 3 seconds)"
+ print "This is a feature to prevent distfiles corruption."
+ print "/usr/lib/portage/bin/clean_locks can fix stuck locks."
+ print "Lockfile: " + lockfilename
+ time.sleep(3)
+
+ os.unlink(myhardlock)
+ return False
+
+def unhardlink_lockfile(lockfilename):
+ myhardlock = hardlock_name(lockfilename)
+ if hardlink_is_mine(myhardlock, lockfilename):
+ # Make sure not to touch lockfilename unless we really have a lock.
+ try:
+ os.unlink(lockfilename)
+ except OSError:
+ pass
+ try:
+ os.unlink(myhardlock)
+ except OSError:
+ pass
+
+def hardlock_cleanup(path, remove_all_locks=False):
+ mypid = str(os.getpid())
+ myhost = os.uname()[1]
+ mydl = os.listdir(path)
+
+ results = []
+ mycount = 0
+
+ mylist = {}
+ for x in mydl:
+ if os.path.isfile(path+"/"+x):
+ parts = x.split(".hardlock-")
+ if len(parts) == 2:
+ filename = parts[0]
+ hostpid = parts[1].split("-")
+ host = "-".join(hostpid[:-1])
+ pid = hostpid[-1]
+
+ if not mylist.has_key(filename):
+ mylist[filename] = {}
+ if not mylist[filename].has_key(host):
+ mylist[filename][host] = []
+ mylist[filename][host].append(pid)
+
+ mycount += 1
+
+
+ results.append("Found %(count)s locks" % {"count":mycount})
+
+ for x in mylist.keys():
+ if mylist[x].has_key(myhost) or remove_all_locks:
+ mylockname = hardlock_name(path+"/"+x)
+ if hardlink_is_mine(mylockname, path+"/"+x) or \
+ not os.path.exists(path+"/"+x) or \
+ remove_all_locks:
+ for y in mylist[x].keys():
+ for z in mylist[x][y]:
+ filename = path+"/"+x+".hardlock-"+y+"-"+z
+ if filename == mylockname:
+ continue
+ try:
+ # We're sweeping through, unlinking everyone's locks.
+ os.unlink(filename)
+ results.append(_("Unlinked: ") + filename)
+ except OSError:
+ pass
+ try:
+ os.unlink(path+"/"+x)
+ results.append(_("Unlinked: ") + path+"/"+x)
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except OSError:
+ pass
+ else:
+ try:
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except OSError:
+ pass
+
+ return results
+
diff --git a/pym/portage/mail.py b/pym/portage/mail.py
new file mode 100644
index 00000000..99ed77fd
--- /dev/null
+++ b/pym/portage/mail.py
@@ -0,0 +1,89 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: portage.py 3483 2006-06-10 21:40:40Z genone $
+
+import portage_exception, socket, smtplib, os, sys, time
+from email.MIMEText import MIMEText as TextMessage
+from email.MIMEMultipart import MIMEMultipart as MultipartMessage
+from email.MIMEBase import MIMEBase as BaseMessage
+
+def create_message(sender, recipient, subject, body, attachments=None):
+ if attachments == None:
+ mymessage = TextMessage(body)
+ else:
+ mymessage = MultipartMessage()
+ mymessage.attach(TextMessage(body))
+ for x in attachments:
+ if isinstance(x, BaseMessage):
+ mymessage.attach(x)
+ elif isinstance(x, str):
+ mymessage.attach(TextMessage(x))
+ else:
+ raise portage_exception.PortageException("Can't handle type of attachment: %s" % type(x))
+
+ mymessage.set_unixfrom(sender)
+ mymessage["To"] = recipient
+ mymessage["From"] = sender
+ mymessage["Subject"] = subject
+ mymessage["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S %z")
+
+ return mymessage
+
+def send_mail(mysettings, message):
+ mymailhost = "localhost"
+ mymailport = 25
+ mymailuser = ""
+ mymailpasswd = ""
+ myrecipient = "root@localhost"
+
+ # Syntax for PORTAGE_ELOG_MAILURI (if defined):
+ # adress [[user:passwd@]mailserver[:port]]
+ # where adress: recipient adress
+ # user: username for smtp auth (defaults to none)
+ # passwd: password for smtp auth (defaults to none)
+ # mailserver: smtp server that should be used to deliver the mail (defaults to localhost)
+ # alternatively this can also be the absolute path to a sendmail binary if you don't want to use smtp
+ # port: port to use on the given smtp server (defaults to 25, values > 100000 indicate that starttls should be used on (port-100000))
+ if " " in mysettings["PORTAGE_ELOG_MAILURI"]:
+ myrecipient, mymailuri = mysettings["PORTAGE_ELOG_MAILURI"].split()
+ if "@" in mymailuri:
+ myauthdata, myconndata = mymailuri.rsplit("@", 1)
+ try:
+ mymailuser,mymailpasswd = myauthdata.split(":")
+ except ValueError:
+ print "!!! invalid SMTP AUTH configuration, trying unauthenticated ..."
+ else:
+ myconndata = mymailuri
+ if ":" in myconndata:
+ mymailhost,mymailport = myconndata.split(":")
+ else:
+ mymailhost = myconndata
+ else:
+ myrecipient = mysettings["PORTAGE_ELOG_MAILURI"]
+
+ myfrom = message.get("From")
+
+ # user wants to use a sendmail binary instead of smtp
+ if mymailhost[0] == os.sep and os.path.exists(mymailhost):
+ fd = os.popen(mymailhost+" -f "+myfrom+" "+myrecipient, "w")
+ fd.write(message.as_string())
+ if fd.close() != None:
+ sys.stderr.write("!!! %s returned with a non-zero exit code. This generally indicates an error.\n" % mymailhost)
+ else:
+ try:
+ if int(mymailport) > 100000:
+ myconn = smtplib.SMTP(mymailhost, int(mymailport) - 100000)
+ myconn.starttls()
+ else:
+ myconn = smtplib.SMTP(mymailhost, mymailport)
+ if mymailuser != "" and mymailpasswd != "":
+ myconn.login(mymailuser, mymailpasswd)
+ myconn.sendmail(myfrom, myrecipient, message.as_string())
+ myconn.quit()
+ except smtplib.SMTPException, e:
+ raise portage_exception.PortageException("!!! An error occured while trying to send logmail:\n"+str(e))
+ except socket.error, e:
+ raise portage_exception.PortageException("!!! A network error occured while trying to send logmail:\n"+str(e)+"\nSure you configured PORTAGE_ELOG_MAILURI correctly?")
+ return
+
diff --git a/pym/portage/manifest.py b/pym/portage/manifest.py
new file mode 100644
index 00000000..e621606c
--- /dev/null
+++ b/pym/portage/manifest.py
@@ -0,0 +1,618 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+import errno, os, sets
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+
+import portage_exception, portage_versions, portage_const
+from portage_checksum import *
+from portage_exception import *
+from portage_util import write_atomic
+
+class FileNotInManifestException(PortageException):
+ pass
+
+def manifest2AuxfileFilter(filename):
+ filename = filename.strip(os.sep)
+ mysplit = filename.split(os.path.sep)
+ if "CVS" in mysplit:
+ return False
+ for x in mysplit:
+ if x.startswith("."):
+ return False
+ return not filename.startswith("digest-")
+
+def manifest2MiscfileFilter(filename):
+ filename = filename.strip(os.sep)
+ return not (filename in ["CVS", ".svn", "files", "Manifest"] or filename.endswith(".ebuild"))
+
+def guessManifestFileType(filename):
+ """ Perform a best effort guess of which type the given filename is, avoid using this if possible """
+ if filename.startswith("files" + os.sep + "digest-"):
+ return None
+ if filename.startswith("files" + os.sep):
+ return "AUX"
+ elif filename.endswith(".ebuild"):
+ return "EBUILD"
+ elif filename in ["ChangeLog", "metadata.xml"]:
+ return "MISC"
+ else:
+ return "DIST"
+
+def parseManifest2(mysplit):
+ myentry = None
+ if len(mysplit) > 4 and mysplit[0] in portage_const.MANIFEST2_IDENTIFIERS:
+ mytype = mysplit[0]
+ myname = mysplit[1]
+ mysize = int(mysplit[2])
+ myhashes = dict(zip(mysplit[3::2], mysplit[4::2]))
+ myhashes["size"] = mysize
+ myentry = Manifest2Entry(type=mytype, name=myname, hashes=myhashes)
+ return myentry
+
+def parseManifest1(mysplit):
+ myentry = None
+ if len(mysplit) == 4 and mysplit[0] in ["size"] + portage_const.MANIFEST1_HASH_FUNCTIONS:
+ myname = mysplit[2]
+ mytype = None
+ mytype = guessManifestFileType(myname)
+ if mytype == "AUX":
+ if myname.startswith("files" + os.path.sep):
+ myname = myname[6:]
+ mysize = int(mysplit[3])
+ myhashes = {mysplit[0]: mysplit[1]}
+ myhashes["size"] = mysize
+ myentry = Manifest1Entry(type=mytype, name=myname, hashes=myhashes)
+ return myentry
+
+class ManifestEntry(object):
+ __slots__ = ("type", "name", "hashes")
+ def __init__(self, **kwargs):
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
+ def __cmp__(self, other):
+ if str(self) == str(other):
+ return 0
+ return 1
+
+class Manifest1Entry(ManifestEntry):
+ def __str__(self):
+ myhashkeys = self.hashes.keys()
+ for hashkey in myhashkeys:
+ if hashkey != "size":
+ break
+ hashvalue = self.hashes[hashkey]
+ myname = self.name
+ if self.type == "AUX" and not myname.startswith("files" + os.sep):
+ myname = os.path.join("files", myname)
+ return " ".join([hashkey, str(hashvalue), myname, str(self.hashes["size"])])
+
+class Manifest2Entry(ManifestEntry):
+ def __str__(self):
+ myline = " ".join([self.type, self.name, str(self.hashes["size"])])
+ myhashkeys = self.hashes.keys()
+ myhashkeys.remove("size")
+ myhashkeys.sort()
+ for h in myhashkeys:
+ myline += " " + h + " " + str(self.hashes[h])
+ return myline
+
+class Manifest(object):
+ parsers = (parseManifest2, parseManifest1)
+ def __init__(self, pkgdir, distdir, fetchlist_dict=None,
+ manifest1_compat=True, from_scratch=False):
+ """ create new Manifest instance for package in pkgdir
+ and add compability entries for old portage versions if manifest1_compat == True.
+ Do not parse Manifest file if from_scratch == True (only for internal use)
+ The fetchlist_dict parameter is required only for generation of
+ a Manifest (not needed for parsing and checking sums)."""
+ self.pkgdir = pkgdir.rstrip(os.sep) + os.sep
+ self.fhashdict = {}
+ self.hashes = portage_const.MANIFEST2_HASH_FUNCTIONS[:]
+ self.hashes.append("size")
+ if manifest1_compat:
+ self.hashes.extend(portage_const.MANIFEST1_HASH_FUNCTIONS)
+ self.hashes = sets.Set(self.hashes)
+ for t in portage_const.MANIFEST2_IDENTIFIERS:
+ self.fhashdict[t] = {}
+ if not from_scratch:
+ self._read()
+ self.compat = manifest1_compat
+ if fetchlist_dict != None:
+ self.fetchlist_dict = fetchlist_dict
+ else:
+ self.fetchlist_dict = {}
+ self.distdir = distdir
+ self.guessType = guessManifestFileType
+
+ def getFullname(self):
+ """ Returns the absolute path to the Manifest file for this instance """
+ return os.path.join(self.pkgdir, "Manifest")
+
+ def getDigests(self):
+ """ Compability function for old digest/manifest code, returns dict of filename:{hashfunction:hashvalue} """
+ rval = {}
+ for t in portage_const.MANIFEST2_IDENTIFIERS:
+ rval.update(self.fhashdict[t])
+ return rval
+
+ def getTypeDigests(self, ftype):
+ """ Similar to getDigests(), but restricted to files of the given type. """
+ return self.fhashdict[ftype]
+
+ def _readDigests(self, myhashdict=None):
+ """ Parse old style digest files for this Manifest instance """
+ if myhashdict is None:
+ myhashdict = {}
+ try:
+ for d in os.listdir(os.path.join(self.pkgdir, "files")):
+ if d.startswith("digest-"):
+ self._readManifest(os.path.join(self.pkgdir, "files", d), mytype="DIST",
+ myhashdict=myhashdict)
+ except (IOError, OSError), e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+ return myhashdict
+
+ def _readManifest(self, file_path, myhashdict=None, **kwargs):
+ """Parse a manifest or an old style digest. If myhashdict is given
+ then data will be added too it. Otherwise, a new dict will be created
+ and returned."""
+ try:
+ fd = open(file_path, "r")
+ if myhashdict is None:
+ myhashdict = {}
+ self._parseDigests(fd, myhashdict=myhashdict, **kwargs)
+ fd.close()
+ return myhashdict
+ except (OSError, IOError), e:
+ if e.errno == errno.ENOENT:
+ raise FileNotFound(file_path)
+ else:
+ raise
+
+ def _read(self):
+ """ Parse Manifest file for this instance """
+ try:
+ self._readManifest(self.getFullname(), myhashdict=self.fhashdict)
+ except FileNotFound:
+ pass
+ self._readDigests(myhashdict=self.fhashdict)
+
+
+ def _parseManifestLines(self, mylines):
+ """Parse manifest lines and return a list of manifest entries."""
+ for myline in mylines:
+ myentry = None
+ mysplit = myline.split()
+ for parser in self.parsers:
+ myentry = parser(mysplit)
+ if myentry is not None:
+ yield myentry
+ break # go to the next line
+
+ def _parseDigests(self, mylines, myhashdict=None, mytype=None):
+ """Parse manifest entries and store the data in myhashdict. If mytype
+ is specified, it will override the type for all parsed entries."""
+ if myhashdict is None:
+ myhashdict = {}
+ for myentry in self._parseManifestLines(mylines):
+ if mytype is None:
+ myentry_type = myentry.type
+ else:
+ myentry_type = mytype
+ myhashdict.setdefault(myentry_type, {})
+ myhashdict[myentry_type].setdefault(myentry.name, {})
+ myhashdict[myentry_type][myentry.name].update(myentry.hashes)
+ return myhashdict
+
+ def _writeDigests(self, force=False):
+ """ Create old style digest files for this Manifest instance """
+ cpvlist = [os.path.join(self._pkgdir_category(), x[:-7]) for x in os.listdir(self.pkgdir) if x.endswith(".ebuild")]
+ rval = []
+ try:
+ os.makedirs(os.path.join(self.pkgdir, "files"))
+ except OSError, oe:
+ if oe.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+ for cpv in cpvlist:
+ dname = os.path.join(self.pkgdir, "files", "digest-%s" % self._catsplit(cpv)[1])
+ distlist = self._getCpvDistfiles(cpv)
+ missing_digests = set()
+ for f in distlist:
+ if f not in self.fhashdict["DIST"] or len(self.fhashdict["DIST"][f]) == 0:
+ missing_digests.add(f)
+ if missing_digests:
+ # This allows us to force remove of stale digests for the
+ # ebuild --force digest option.
+ distlist = [f for f in distlist if f not in missing_digests]
+ update_digest = True
+ if not force:
+ try:
+ f = open(dname, "r")
+ old_data = self._parseDigests(f)
+ f.close()
+ if len(old_data) == 1 and "DIST" in old_data:
+ new_data = self._getDigestData(distlist)
+ if "DIST" in new_data:
+ for myfile in new_data["DIST"]:
+ for hashname in \
+ new_data["DIST"][myfile].keys():
+ if hashname != "size" and hashname not in \
+ portage_const.MANIFEST1_HASH_FUNCTIONS:
+ del new_data["DIST"][myfile][hashname]
+ if new_data["DIST"] == old_data["DIST"]:
+ update_digest = False
+ except (IOError, OSError), e:
+ if errno.ENOENT == e.errno:
+ pass
+ else:
+ raise
+ if update_digest:
+ mylines = self._createDigestLines1(distlist, self.fhashdict)
+ if mylines:
+ mylines = "\n".join(mylines) + "\n"
+ else:
+ mylines = ""
+ write_atomic(dname, mylines)
+ rval.append(dname)
+ return rval
+
+ def _getDigestData(self, distlist):
+ """create a hash dict for a specific list of files"""
+ myhashdict = {}
+ for myname in distlist:
+ for mytype in self.fhashdict:
+ if myname in self.fhashdict[mytype]:
+ myhashdict.setdefault(mytype, {})
+ myhashdict[mytype].setdefault(myname, {})
+ myhashdict[mytype][myname].update(self.fhashdict[mytype][myname])
+ return myhashdict
+
+ def _createDigestLines1(self, distlist, myhashdict):
+ """ Create an old style digest file."""
+ mylines = []
+ myfiles = myhashdict["DIST"].keys()
+ myfiles.sort()
+ for f in myfiles:
+ if f in distlist:
+ myhashkeys = myhashdict["DIST"][f].keys()
+ myhashkeys.sort()
+ for h in myhashkeys:
+ if h not in portage_const.MANIFEST1_HASH_FUNCTIONS:
+ continue
+ myline = " ".join([h, str(myhashdict["DIST"][f][h]), f, str(myhashdict["DIST"][f]["size"])])
+ mylines.append(myline)
+ return mylines
+
+ def _addDigestsToManifest(self, digests, fd):
+ """ Add entries for old style digest files to Manifest file """
+ mylines = []
+ for dname in digests:
+ myhashes = perform_multiple_checksums(dname, portage_const.MANIFEST1_HASH_FUNCTIONS+["size"])
+ for h in myhashes:
+ mylines.append((" ".join([h, str(myhashes[h]), os.path.join("files", os.path.basename(dname)), str(myhashes["size"])])))
+ fd.write("\n".join(mylines))
+ fd.write("\n")
+
+ def _createManifestEntries(self):
+ mytypes = self.fhashdict.keys()
+ mytypes.sort()
+ for t in mytypes:
+ myfiles = self.fhashdict[t].keys()
+ myfiles.sort()
+ for f in myfiles:
+ myentry = Manifest2Entry(
+ type=t, name=f, hashes=self.fhashdict[t][f].copy())
+ myhashkeys = myentry.hashes.keys()
+ for h in myhashkeys:
+ if h not in ["size"] + portage_const.MANIFEST2_HASH_FUNCTIONS:
+ del myentry.hashes[h]
+ yield myentry
+ if self.compat and t != "DIST":
+ mysize = self.fhashdict[t][f]["size"]
+ myhashes = self.fhashdict[t][f]
+ for h in myhashkeys:
+ if h not in portage_const.MANIFEST1_HASH_FUNCTIONS:
+ continue
+ yield Manifest1Entry(
+ type=t, name=f, hashes={"size":mysize, h:myhashes[h]})
+
+ if self.compat:
+ cvp_list = self.fetchlist_dict.keys()
+ cvp_list.sort()
+ for cpv in cvp_list:
+ digest_path = os.path.join("files", "digest-%s" % self._catsplit(cpv)[1])
+ dname = os.path.join(self.pkgdir, digest_path)
+ try:
+ myhashes = perform_multiple_checksums(dname, portage_const.MANIFEST1_HASH_FUNCTIONS+["size"])
+ myhashkeys = myhashes.keys()
+ myhashkeys.sort()
+ for h in myhashkeys:
+ if h in portage_const.MANIFEST1_HASH_FUNCTIONS:
+ yield Manifest1Entry(type="AUX", name=digest_path,
+ hashes={"size":myhashes["size"], h:myhashes[h]})
+ except FileNotFound:
+ pass
+
+ def write(self, sign=False, force=False):
+ """ Write Manifest instance to disk, optionally signing it """
+ try:
+ if self.compat:
+ self._writeDigests()
+ myentries = list(self._createManifestEntries())
+ update_manifest = True
+ if not force:
+ try:
+ f = open(self.getFullname(), "r")
+ oldentries = list(self._parseManifestLines(f))
+ f.close()
+ if len(oldentries) == len(myentries):
+ update_manifest = False
+ for i in xrange(len(oldentries)):
+ if oldentries[i] != myentries[i]:
+ update_manifest = True
+ break
+ except (IOError, OSError), e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+ if update_manifest:
+ fd = open(self.getFullname(), "w")
+ for myentry in myentries:
+ fd.write("%s\n" % str(myentry))
+ fd.close()
+ if sign:
+ self.sign()
+ except (IOError, OSError), e:
+ if e.errno == errno.EACCES:
+ raise PermissionDenied(str(e))
+ raise
+
+ def sign(self):
+ """ Sign the Manifest """
+ raise NotImplementedError()
+
+ def validateSignature(self):
+ """ Validate signature on Manifest """
+ raise NotImplementedError()
+
+ def addFile(self, ftype, fname, hashdict=None, ignoreMissing=False):
+ """ Add entry to Manifest optionally using hashdict to avoid recalculation of hashes """
+ if ftype == "AUX" and not fname.startswith("files/"):
+ fname = os.path.join("files", fname)
+ if not os.path.exists(self.pkgdir+fname) and not ignoreMissing:
+ raise FileNotFound(fname)
+ if not ftype in portage_const.MANIFEST2_IDENTIFIERS:
+ raise InvalidDataType(ftype)
+ if ftype == "AUX" and fname.startswith("files"):
+ fname = fname[6:]
+ self.fhashdict[ftype][fname] = {}
+ if hashdict != None:
+ self.fhashdict[ftype][fname].update(hashdict)
+ if not portage_const.MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]:
+ self.updateFileHashes(ftype, fname, checkExisting=False, ignoreMissing=ignoreMissing)
+
+ def removeFile(self, ftype, fname):
+ """ Remove given entry from Manifest """
+ del self.fhashdict[ftype][fname]
+
+ def hasFile(self, ftype, fname):
+ """ Return wether the Manifest contains an entry for the given type,filename pair """
+ return (fname in self.fhashdict[ftype])
+
+ def findFile(self, fname):
+ """ Return entrytype of the given file if present in Manifest or None if not present """
+ for t in portage_const.MANIFEST2_IDENTIFIERS:
+ if fname in self.fhashdict[t]:
+ return t
+ return None
+
+ def create(self, checkExisting=False, assumeDistHashesSometimes=False,
+ assumeDistHashesAlways=False, requiredDistfiles=[]):
+ """ Recreate this Manifest from scratch. This will not use any
+ existing checksums unless assumeDistHashesSometimes or
+ assumeDistHashesAlways is true (assumeDistHashesSometimes will only
+ cause DIST checksums to be reused if the file doesn't exist in
+ DISTDIR). The requiredDistfiles parameter specifies a list of
+ distfiles to raise a FileNotFound exception for (if no file or existing
+ checksums are available), and defaults to all distfiles when not
+ specified."""
+ if checkExisting:
+ self.checkAllHashes()
+ if assumeDistHashesSometimes or assumeDistHashesAlways:
+ distfilehashes = self.fhashdict["DIST"]
+ else:
+ distfilehashes = {}
+ self.__init__(self.pkgdir, self.distdir,
+ fetchlist_dict=self.fetchlist_dict, from_scratch=True)
+ for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(self.pkgdir):
+ break
+ for f in pkgdir_files:
+ if f.endswith(".ebuild"):
+ mytype = "EBUILD"
+ elif manifest2MiscfileFilter(f):
+ mytype = "MISC"
+ else:
+ continue
+ self.fhashdict[mytype][f] = perform_multiple_checksums(self.pkgdir+f, self.hashes)
+ recursive_files = []
+ cut_len = len(os.path.join(self.pkgdir, "files") + os.sep)
+ for parentdir, dirs, files in os.walk(os.path.join(self.pkgdir, "files")):
+ for f in files:
+ full_path = os.path.join(parentdir, f)
+ recursive_files.append(full_path[cut_len:])
+ for f in recursive_files:
+ if not manifest2AuxfileFilter(f):
+ continue
+ self.fhashdict["AUX"][f] = perform_multiple_checksums(
+ os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
+ cpvlist = [os.path.join(self._pkgdir_category(), x[:-7]) for x in os.listdir(self.pkgdir) if x.endswith(".ebuild")]
+ distlist = set()
+ for cpv in cpvlist:
+ distlist.update(self._getCpvDistfiles(cpv))
+ if requiredDistfiles is None:
+ # This allows us to force removal of stale digests for the
+ # ebuild --force digest option (no distfiles are required).
+ requiredDistfiles = set()
+ elif len(requiredDistfiles) == 0:
+ # repoman passes in an empty list, which implies that all distfiles
+ # are required.
+ requiredDistfiles = distlist.copy()
+ for f in distlist:
+ fname = os.path.join(self.distdir, f)
+ mystat = None
+ try:
+ mystat = os.stat(fname)
+ except OSError:
+ pass
+ if f in distfilehashes and \
+ ((assumeDistHashesSometimes and mystat is None) or \
+ (assumeDistHashesAlways and mystat is None) or \
+ (assumeDistHashesAlways and mystat is not None and \
+ len(distfilehashes[f]) == len(self.hashes) and \
+ distfilehashes[f]["size"] == mystat.st_size)):
+ self.fhashdict["DIST"][f] = distfilehashes[f]
+ else:
+ try:
+ self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes)
+ except FileNotFound:
+ if f in requiredDistfiles:
+ raise
+
+ def _pkgdir_category(self):
+ return self.pkgdir.rstrip(os.sep).split(os.sep)[-2]
+
+ def _getAbsname(self, ftype, fname):
+ if ftype == "DIST":
+ absname = os.path.join(self.distdir, fname)
+ elif ftype == "AUX":
+ absname = os.path.join(self.pkgdir, "files", fname)
+ else:
+ absname = os.path.join(self.pkgdir, fname)
+ return absname
+
+ def checkAllHashes(self, ignoreMissingFiles=False):
+ for t in portage_const.MANIFEST2_IDENTIFIERS:
+ self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
+
+ def checkTypeHashes(self, idtype, ignoreMissingFiles=False):
+ for f in self.fhashdict[idtype]:
+ self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles)
+
+ def checkFileHashes(self, ftype, fname, ignoreMissing=False):
+ myhashes = self.fhashdict[ftype][fname]
+ try:
+ ok,reason = verify_all(self._getAbsname(ftype, fname), self.fhashdict[ftype][fname])
+ if not ok:
+ raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason)))
+ return ok, reason
+ except FileNotFound, e:
+ if not ignoreMissing:
+ raise
+ return False, "File Not Found: '%s'" % str(e)
+
+ def checkCpvHashes(self, cpv, checkDistfiles=True, onlyDistfiles=False, checkMiscfiles=False):
+ """ check the hashes for all files associated to the given cpv, include all
+ AUX files and optionally all MISC files. """
+ if not onlyDistfiles:
+ self.checkTypeHashes("AUX", ignoreMissingFiles=False)
+ if checkMiscfiles:
+ self.checkTypeHashes("MISC", ignoreMissingFiles=False)
+ ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+ self.checkFileHashes("EBUILD", ebuildname, ignoreMissing=False)
+ if checkDistfiles or onlyDistfiles:
+ for f in self._getCpvDistfiles(cpv):
+ self.checkFileHashes("DIST", f, ignoreMissing=False)
+
+ def _getCpvDistfiles(self, cpv):
+ """ Get a list of all DIST files associated to the given cpv """
+ return self.fetchlist_dict[cpv]
+
+ def getDistfilesSize(self, fetchlist):
+ total_bytes = 0
+ for f in fetchlist:
+ total_bytes += int(self.fhashdict["DIST"][f]["size"])
+ return total_bytes
+
+ def updateFileHashes(self, ftype, fname, checkExisting=True, ignoreMissing=True, reuseExisting=False):
+ """ Regenerate hashes for the given file """
+ if checkExisting:
+ self.checkFileHashes(ftype, fname, ignoreMissing=ignoreMissing)
+ if not ignoreMissing and not self.fhashdict[ftype].has_key(fname):
+ raise FileNotInManifestException(fname)
+ if not self.fhashdict[ftype].has_key(fname):
+ self.fhashdict[ftype][fname] = {}
+ myhashkeys = list(self.hashes)
+ if reuseExisting:
+ for k in [h for h in self.fhashdict[ftype][fname] if h in myhashkeys]:
+ myhashkeys.remove(k)
+ myhashes = perform_multiple_checksums(self._getAbsname(ftype, fname), myhashkeys)
+ self.fhashdict[ftype][fname].update(myhashes)
+
+ def updateTypeHashes(self, idtype, checkExisting=False, ignoreMissingFiles=True):
+ """ Regenerate all hashes for all files of the given type """
+ for fname in self.fhashdict[idtype]:
+ self.updateFileHashes(idtype, fname, checkExisting)
+
+ def updateAllHashes(self, checkExisting=False, ignoreMissingFiles=True):
+ """ Regenerate all hashes for all files in this Manifest. """
+ for ftype in portage_const.MANIFEST2_IDENTIFIERS:
+ self.updateTypeHashes(idtype, fname, checkExisting)
+
+ def updateCpvHashes(self, cpv, ignoreMissingFiles=True):
+ """ Regenerate all hashes associated to the given cpv (includes all AUX and MISC
+ files)."""
+ self.updateTypeHashes("AUX", ignoreMissingFiles=ignoreMissingFiles)
+ self.updateTypeHashes("MISC", ignoreMissingFiles=ignoreMissingFiles)
+ ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+ self.updateFileHashes("EBUILD", ebuildname, ignoreMissingFiles=ignoreMissingFiles)
+ for f in self._getCpvDistfiles(cpv):
+ self.updateFileHashes("DIST", f, ignoreMissingFiles=ignoreMissingFiles)
+
+ def updateHashesGuessType(self, fname, *args, **kwargs):
+ """ Regenerate hashes for the given file (guesses the type and then
+ calls updateFileHashes)."""
+ mytype = self.guessType(fname)
+ if mytype == "AUX":
+ fname = fname[len("files" + os.sep):]
+ elif mytype is None:
+ return
+ myrealtype = self.findFile(fname)
+ if myrealtype is not None:
+ mytype = myrealtype
+ return self.updateFileHashes(mytype, fname, *args, **kwargs)
+
+ def getFileData(self, ftype, fname, key):
+ """ Return the value of a specific (type,filename,key) triple, mainly useful
+ to get the size for distfiles."""
+ return self.fhashdict[ftype][fname][key]
+
+ def getVersions(self):
+ """ Returns a list of manifest versions present in the manifest file. """
+ rVal = []
+ mfname = self.getFullname()
+ if not os.path.exists(mfname):
+ return rVal
+ myfile = open(mfname, "r")
+ lines = myfile.readlines()
+ myfile.close()
+ for l in lines:
+ mysplit = l.split()
+ if len(mysplit) == 4 and mysplit[0] in portage_const.MANIFEST1_HASH_FUNCTIONS and not 1 in rVal:
+ rVal.append(1)
+ elif len(mysplit) > 4 and mysplit[0] in portage_const.MANIFEST2_IDENTIFIERS and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal:
+ rVal.append(2)
+ return rVal
+
+ def _catsplit(self, pkg_key):
+ """Split a category and package, returning a list of [cat, pkg].
+ This is compatible with portage.catsplit()"""
+ return pkg_key.split("/", 1)
diff --git a/pym/portage/news.py b/pym/portage/news.py
new file mode 100644
index 00000000..b54261d9
--- /dev/null
+++ b/pym/portage/news.py
@@ -0,0 +1,268 @@
+# portage: news management code
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+from portage_const import INCREMENTALS, PROFILE_PATH, NEWS_LIB_PATH
+from portage import config, vartree, vardbapi, portdbapi
+from portage_util import ensure_dirs, apply_permissions
+from portage_data import portage_gid
+from portage_locks import lockfile, unlockfile, lockdir, unlockdir
+from portage_exception import FileNotFound
+import os, re
+
+class NewsManager(object):
+ """
+ This object manages GLEP 42 style news items. It will cache news items
+ that have previously shown up and notify users when there are relevant news
+ items that apply to their packages that the user has not previously read.
+
+ Creating a news manager requires:
+ root - typically ${ROOT} see man make.conf and man emerge for details
+ NEWS_PATH - path to news items; usually $REPODIR/metadata/news
+ UNREAD_PATH - path to the news.repoid.unread file; this helps us track news items
+
+ """
+
+ TIMESTAMP_FILE = "news-timestamp"
+
+ def __init__( self, root, NEWS_PATH, UNREAD_PATH, LANGUAGE_ID='en' ):
+ self.NEWS_PATH = NEWS_PATH
+ self.UNREAD_PATH = UNREAD_PATH
+ self.TIMESTAMP_PATH = os.path.join( root, NEWS_LIB_PATH, NewsManager.TIMESTAMP_FILE )
+ self.target_root = root
+ self.LANGUAGE_ID = LANGUAGE_ID
+ self.config = config( config_root = os.environ.get("PORTAGE_CONFIGROOT", "/"),
+ target_root = root, config_incrementals = INCREMENTALS)
+ self.vdb = vardbapi( settings = self.config, root = root,
+ vartree = vartree( root = root, settings = self.config ) )
+ self.portdb = portdbapi( porttree_root = self.config["PORTDIR"], mysettings = self.config )
+
+ # Ensure that the unread path exists and is writable.
+ dirmode = 02070
+ modemask = 02
+ ensure_dirs(self.UNREAD_PATH, mode=dirmode, mask=modemask, gid=portage_gid)
+
+ def updateItems( self, repoid ):
+ """
+ Figure out which news items from NEWS_PATH are both unread and relevant to
+ the user (according to the GLEP 42 standards of relevancy). Then add these
+ items into the news.repoid.unread file.
+ """
+
+ repos = self.portdb.getRepositories()
+ if repoid not in repos:
+ raise ValueError("Invalid repoID: %s" % repoid)
+
+ timestamp_file = self.TIMESTAMP_PATH + repoid
+ if os.path.exists(timestamp_file):
+ # Make sure the timestamp has correct permissions.
+ apply_permissions( filename=timestamp_file,
+ uid=self.config["PORTAGE_INST_UID"], gid=portage_gid, mode=664 )
+ timestamp = os.stat(timestamp_file).st_mtime
+ else:
+ timestamp = 0
+
+ path = os.path.join( self.portdb.getRepositoryPath( repoid ), self.NEWS_PATH )
+ newsdir_lock = None
+ try:
+ newsdir_lock = lockdir( self.portdb.getRepositoryPath(repoid) )
+ # Skip reading news for repoid if the news dir does not exist. Requested by
+ # NightMorph :)
+ if not os.path.exists( path ):
+ return None
+ news = os.listdir( path )
+ updates = []
+ for item in news:
+ try:
+ file = os.path.join( path, item, item + "." + self.LANGUAGE_ID + ".txt")
+ tmp = NewsItem( file , timestamp )
+ except TypeError:
+ continue
+
+ if tmp.isRelevant( profile=os.readlink(PROFILE_PATH), config=config, vardb=self.vdb):
+ updates.append( tmp )
+ finally:
+ if newsdir_lock:
+ unlockdir(newsdir_lock)
+
+ del path
+
+ path = os.path.join( self.UNREAD_PATH, "news-" + repoid + ".unread" )
+ try:
+ unread_lock = lockfile( path )
+ if not os.path.exists( path ):
+ #create the file if it does not exist
+ open( path, "w" )
+ # Ensure correct perms on the unread file.
+ apply_permissions( filename=path,
+ uid=self.config["PORTAGE_INST_UID"], gid=portage_gid, mode=664 )
+ # Make sure we have the correct permissions when created
+ unread_file = open( path, "a" )
+
+ for item in updates:
+ unread_file.write( item.path + "\n" )
+ unread_file.close()
+ finally:
+ unlockfile(unread_lock)
+
+ # Touch the timestamp file
+ f = open(timestamp_file, "w")
+ f.close()
+
+ def getUnreadItems( self, repoid, update=False ):
+ """
+ Determine if there are unread relevant items in news.repoid.unread.
+ If there are unread items return their number.
+ If update is specified, updateNewsItems( repoid ) will be called to
+ check for new items.
+ """
+
+ if update:
+ self.updateItems( repoid )
+
+ unreadfile = os.path.join( self.UNREAD_PATH, "news-"+ repoid +".unread" )
+ try:
+ try:
+ unread_lock = lockfile(unreadfile)
+ # Set correct permissions on the news-repoid.unread file
+ apply_permissions( filename=unreadfile,
+ uid=int(self.config["PORTAGE_INST_UID"]), gid=portage_gid, mode=0664 )
+
+ if os.path.exists( unreadfile ):
+ unread = open( unreadfile ).readlines()
+ if len(unread):
+ return len(unread)
+ except FileNotFound:
+ pass # unread file may not exist
+ finally:
+ if unread_lock:
+ unlockfile(unread_lock)
+
+_installedRE = re.compile("Display-If-Installed:(.*)\n")
+_profileRE = re.compile("Display-If-Profile:(.*)\n")
+_keywordRE = re.compile("Display-If-Keyword:(.*)\n")
+
+class NewsItem(object):
+ """
+ This class encapsulates a GLEP 42 style news item.
+ It's purpose is to wrap parsing of these news items such that portage can determine
+ whether a particular item is 'relevant' or not. This requires parsing the item
+ and determining 'relevancy restrictions'; these include "Display if Installed" or
+ "display if arch: x86" and so forth.
+
+ Creation of a news item involves passing in the path to the particular news item.
+
+ """
+
+ def __init__( self, path, cache_mtime = 0 ):
+ """
+ For a given news item we only want if it path is a file and it's
+ mtime is newer than the cache'd timestamp.
+ """
+ if not os.path.isfile( path ):
+ raise TypeError
+ if not os.stat( path ).st_mtime > cache_mtime:
+ raise TypeError
+ self.path = path
+ self._parsed = False
+
+ def isRelevant( self, vardb, config, profile ):
+ """
+ This function takes a dict of keyword arguments; one should pass in any
+ objects need to do to lookups (like what keywords we are on, what profile,
+ and a vardb so we can look at installed packages).
+ Each restriction will pluck out the items that are required for it to match
+ or raise a ValueError exception if the required object is not present.
+ """
+
+ if not len(self.restrictions):
+ return True # no restrictions to match means everyone should see it
+
+ kwargs = { 'vardb' : vardb,
+ 'config' : config,
+ 'profile' : profile }
+
+ for restriction in self.restrictions:
+ if restriction.checkRestriction( **kwargs ):
+ return True
+
+ return False # No restrictions were met; thus we aren't relevant :(
+
+ def parse( self ):
+ lines = open(self.path).readlines()
+ self.restrictions = []
+ for line in lines:
+ #Optimization to ignore regex matchines on lines that
+ #will never match
+ if not line.startswith("D"):
+ continue
+ restricts = { _installedRE : DisplayInstalledRestriction,
+ _profileRE : DisplayProfileRestriction,
+ _keywordRE : DisplayKeywordRestriction }
+ for regex, restriction in restricts.iteritems():
+ match = regex.match(line)
+ if match:
+ self.restrictions.append( restriction( match.groups()[0].strip() ) )
+ continue
+ self._parsed = True
+
+ def __getattr__( self, attr ):
+ if not self._parsed:
+ self.parse()
+ return self.__dict__[attr]
+
+class DisplayRestriction(object):
+ """
+ A base restriction object representing a restriction of display.
+ news items may have 'relevancy restrictions' preventing them from
+ being important. In this case we need a manner of figuring out if
+ a particular item is relevant or not. If any of it's restrictions
+ are met, then it is displayed
+ """
+
+ def checkRestriction( self, **kwargs ):
+ raise NotImplementedError("Derived class should over-ride this method")
+
+class DisplayProfileRestriction(DisplayRestriction):
+ """
+ A profile restriction where a particular item shall only be displayed
+ if the user is running a specific profile.
+ """
+
+ def __init__( self, profile ):
+ self.profile = profile
+
+ def checkRestriction( self, **kwargs ):
+ if self.profile == kwargs['profile']:
+ return True
+ return False
+
+class DisplayKeywordRestriction(DisplayRestriction):
+ """
+ A keyword restriction where a particular item shall only be displayed
+ if the user is running a specific keyword.
+ """
+
+ def __init__( self, keyword ):
+ self.keyword = keyword
+
+ def checkRestriction( self, **kwargs ):
+ if kwargs['config']["ARCH"] == self.keyword:
+ return True
+ return False
+
+class DisplayInstalledRestriction(DisplayRestriction):
+ """
+ An Installation restriction where a particular item shall only be displayed
+ if the user has that item installed.
+ """
+
+ def __init__( self, cpv ):
+ self.cpv = cpv
+
+ def checkRestriction( self, **kwargs ):
+ vdb = kwargs['vardb']
+ if vdb.match( self.cpv ):
+ return True
+ return False
diff --git a/pym/portage/output.py b/pym/portage/output.py
new file mode 100644
index 00000000..62ec975f
--- /dev/null
+++ b/pym/portage/output.py
@@ -0,0 +1,393 @@
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+__docformat__ = "epytext"
+
+import commands,errno,os,re,shlex,sys
+from portage_const import COLOR_MAP_FILE
+from portage_util import writemsg
+from portage_exception import PortageException, ParseError, PermissionDenied, FileNotFound
+
+havecolor=1
+dotitles=1
+
+esc_seq = "\x1b["
+
+g_attr = {}
+g_attr["normal"] = 0
+
+g_attr["bold"] = 1
+g_attr["faint"] = 2
+g_attr["standout"] = 3
+g_attr["underline"] = 4
+g_attr["blink"] = 5
+g_attr["overline"] = 6 # Why is overline actually useful?
+g_attr["reverse"] = 7
+g_attr["invisible"] = 8
+
+g_attr["no-attr"] = 22
+g_attr["no-standout"] = 23
+g_attr["no-underline"] = 24
+g_attr["no-blink"] = 25
+g_attr["no-overline"] = 26
+g_attr["no-reverse"] = 27
+# 28 isn't defined?
+# 29 isn't defined?
+g_attr["black"] = 30
+g_attr["red"] = 31
+g_attr["green"] = 32
+g_attr["yellow"] = 33
+g_attr["blue"] = 34
+g_attr["magenta"] = 35
+g_attr["cyan"] = 36
+g_attr["white"] = 37
+# 38 isn't defined?
+g_attr["default"] = 39
+g_attr["bg_black"] = 40
+g_attr["bg_red"] = 41
+g_attr["bg_green"] = 42
+g_attr["bg_yellow"] = 43
+g_attr["bg_blue"] = 44
+g_attr["bg_magenta"] = 45
+g_attr["bg_cyan"] = 46
+g_attr["bg_white"] = 47
+g_attr["bg_default"] = 49
+
+
+# make_seq("blue", "black", "normal")
+def color(fg, bg="default", attr=["normal"]):
+ mystr = esc_seq[:] + "%02d" % g_attr[fg]
+ for x in [bg]+attr:
+ mystr += ";%02d" % g_attr[x]
+ return mystr+"m"
+
+
+
+codes={}
+codes["reset"] = esc_seq + "39;49;00m"
+
+codes["bold"] = esc_seq + "01m"
+codes["faint"] = esc_seq + "02m"
+codes["standout"] = esc_seq + "03m"
+codes["underline"] = esc_seq + "04m"
+codes["blink"] = esc_seq + "05m"
+codes["overline"] = esc_seq + "06m" # Who made this up? Seriously.
+
+ansi_color_codes = []
+for x in xrange(30, 38):
+ ansi_color_codes.append("%im" % x)
+ ansi_color_codes.append("%i;01m" % x)
+
+rgb_ansi_colors = ['0x000000', '0x555555', '0xAA0000', '0xFF5555', '0x00AA00',
+ '0x55FF55', '0xAA5500', '0xFFFF55', '0x0000AA', '0x5555FF', '0xAA00AA',
+ '0xFF55FF', '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF']
+
+for x in xrange(len(rgb_ansi_colors)):
+ codes[rgb_ansi_colors[x]] = esc_seq + ansi_color_codes[x]
+
+del x
+
+codes["black"] = codes["0x000000"]
+codes["darkgray"] = codes["0x555555"]
+
+codes["red"] = codes["0xFF5555"]
+codes["darkred"] = codes["0xAA0000"]
+
+codes["green"] = codes["0x55FF55"]
+codes["darkgreen"] = codes["0x00AA00"]
+
+codes["yellow"] = codes["0xFFFF55"]
+codes["brown"] = codes["0xAA5500"]
+
+codes["blue"] = codes["0x5555FF"]
+codes["darkblue"] = codes["0x0000AA"]
+
+codes["fuchsia"] = codes["0xFF55FF"]
+codes["purple"] = codes["0xAA00AA"]
+
+codes["turquoise"] = codes["0x55FFFF"]
+codes["teal"] = codes["0x00AAAA"]
+
+codes["white"] = codes["0xFFFFFF"]
+codes["lightgray"] = codes["0xAAAAAA"]
+
+codes["darkteal"] = codes["turquoise"]
+codes["darkyellow"] = codes["brown"]
+codes["fuscia"] = codes["fuchsia"]
+codes["white"] = codes["bold"]
+
+# Colors from /sbin/functions.sh
+codes["GOOD"] = codes["green"]
+codes["WARN"] = codes["yellow"]
+codes["BAD"] = codes["red"]
+codes["HILITE"] = codes["teal"]
+codes["BRACKET"] = codes["blue"]
+
+# Portage functions
+codes["INFORM"] = codes["darkgreen"]
+codes["UNMERGE_WARN"] = codes["red"]
+codes["MERGE_LIST_PROGRESS"] = codes["yellow"]
+
+def parse_color_map():
+ myfile = COLOR_MAP_FILE
+ ansi_code_pattern = re.compile("^[0-9;]*m$")
+ def strip_quotes(token, quotes):
+ if token[0] in quotes and token[0] == token[-1]:
+ token = token[1:-1]
+ return token
+ try:
+ s = shlex.shlex(open(myfile))
+ s.wordchars = s.wordchars + ";" # for ansi codes
+ d = {}
+ while True:
+ k, o, v = s.get_token(), s.get_token(), s.get_token()
+ if k is s.eof:
+ break
+ if o != "=":
+ raise ParseError("%s%s'%s'" % (s.error_leader(myfile, s.lineno), "expected '=' operator: ", o))
+ k = strip_quotes(k, s.quotes)
+ v = strip_quotes(v, s.quotes)
+ if ansi_code_pattern.match(v):
+ codes[k] = esc_seq + v
+ else:
+ if v in codes:
+ codes[k] = codes[v]
+ else:
+ raise ParseError("%s%s'%s'" % (s.error_leader(myfile, s.lineno), "Undefined: ", v))
+ except (IOError, OSError), e:
+ if e.errno == errno.ENOENT:
+ raise FileNotFound(myfile)
+ elif e.errno == errno.EACCES:
+ raise PermissionDenied(myfile)
+ raise
+
+try:
+ parse_color_map()
+except FileNotFound, e:
+ pass
+except PortageException, e:
+ writemsg("%s\n" % str(e))
+
+def nc_len(mystr):
+ tmp = re.sub(esc_seq + "^m]+m", "", mystr);
+ return len(tmp)
+
+def xtermTitle(mystr, raw=False):
+ if havecolor and dotitles and os.environ.has_key("TERM") and sys.stderr.isatty():
+ myt=os.environ["TERM"]
+ legal_terms = ["xterm","Eterm","aterm","rxvt","screen","kterm","rxvt-unicode","gnome"]
+ for term in legal_terms:
+ if myt.startswith(term):
+ if not raw:
+ mystr = "\x1b]0;%s\x07" % mystr
+ sys.stderr.write(mystr)
+ sys.stderr.flush()
+ break
+
+default_xterm_title = None
+
+def xtermTitleReset():
+ global default_xterm_title
+ if default_xterm_title is None:
+ prompt_command = os.getenv('PROMPT_COMMAND')
+ if prompt_command == "":
+ default_xterm_title = ""
+ elif prompt_command is not None:
+ default_xterm_title = commands.getoutput(prompt_command)
+ else:
+ pwd = os.getenv('PWD','')
+ home = os.getenv('HOME', '')
+ if home != '' and pwd.startswith(home):
+ pwd = '~' + pwd[len(home):]
+ default_xterm_title = '\x1b]0;%s@%s:%s\x07' % (
+ os.getenv('LOGNAME', ''), os.getenv('HOSTNAME', '').split('.', 1)[0], pwd)
+ xtermTitle(default_xterm_title, raw=True)
+
+def notitles():
+ "turn off title setting"
+ dotitles=0
+
+def nocolor():
+ "turn off colorization"
+ global havecolor
+ havecolor=0
+
+def resetColor():
+ return codes["reset"]
+
+def colorize(color_key, text):
+ global havecolor
+ if havecolor:
+ return codes[color_key] + text + codes["reset"]
+ else:
+ return text
+
+compat_functions_colors = ["bold","white","teal","turquoise","darkteal",
+ "fuscia","fuchsia","purple","blue","darkblue","green","darkgreen","yellow",
+ "brown","darkyellow","red","darkred"]
+
+def create_color_func(color_key):
+ def derived_func(*args):
+ newargs = list(args)
+ newargs.insert(0, color_key)
+ return colorize(*newargs)
+ return derived_func
+
+for c in compat_functions_colors:
+ setattr(sys.modules[__name__], c, create_color_func(c))
+
+class EOutput:
+ """
+ Performs fancy terminal formatting for status and informational messages.
+
+ The provided methods produce identical terminal output to the eponymous
+ functions in the shell script C{/sbin/functions.sh} and also accept
+ identical parameters.
+
+ This is not currently a drop-in replacement however, as the output-related
+ functions in C{/sbin/functions.sh} are oriented for use mainly by system
+ init scripts and ebuilds and their output can be customized via certain
+ C{RC_*} environment variables (see C{/etc/conf.d/rc}). B{EOutput} is not
+ customizable in this manner since it's intended for more general uses.
+ Likewise, no logging is provided.
+
+ @ivar quiet: Specifies if output should be silenced.
+ @type quiet: BooleanType
+ @ivar term_columns: Width of terminal in characters. Defaults to the value
+ specified by the shell's C{COLUMNS} variable, else to the queried tty
+ size, else to C{80}.
+ @type term_columns: IntType
+ """
+
+ def __init__(self):
+ self.__last_e_cmd = ""
+ self.__last_e_len = 0
+ self.quiet = False
+ columns = 0
+ try:
+ columns = int(os.getenv("COLUMNS", 0))
+ except ValueError:
+ pass
+ if columns <= 0:
+ try:
+ columns = int(commands.getoutput(
+ 'set -- `stty size 2>/dev/null` ; echo "$2"'))
+ except ValueError:
+ pass
+ if columns <= 0:
+ columns = 80
+ self.term_columns = columns
+
+ def __eend(self, caller, errno, msg):
+ if errno == 0:
+ status_brackets = colorize("BRACKET", "[ ") + colorize("GOOD", "ok") + colorize("BRACKET", " ]")
+ else:
+ status_brackets = colorize("BRACKET", "[ ") + colorize("BAD", "!!") + colorize("BRACKET", " ]")
+ if msg:
+ if caller == "eend":
+ self.eerror(msg[0])
+ elif caller == "ewend":
+ self.ewarn(msg[0])
+ if self.__last_e_cmd != "ebegin":
+ self.__last_e_len = 0
+ print "%*s%s" % ((self.term_columns - self.__last_e_len - 6), "", status_brackets)
+ sys.stdout.flush()
+
+ def ebegin(self, msg):
+ """
+ Shows a message indicating the start of a process.
+
+ @param msg: A very brief (shorter than one line) description of the
+ starting process.
+ @type msg: StringType
+ """
+ msg += " ..."
+ if not self.quiet:
+ self.einfon(msg)
+ self.__last_e_len = len(msg) + 4
+ self.__last_e_cmd = "ebegin"
+
+ def eend(self, errno, *msg):
+ """
+ Indicates the completion of a process, optionally displaying a message
+ via L{eerror} if the process's exit status isn't C{0}.
+
+ @param errno: A standard UNIX C{errno} code returned by processes upon
+ exit.
+ @type errno: IntType
+ @param msg: I{(optional)} An error message, typically a standard UNIX
+ error string corresponding to C{errno}.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ self.__eend("eend", errno, msg)
+ self.__last_e_cmd = "eend"
+
+ def eerror(self, msg):
+ """
+ Shows an error message.
+
+ @param msg: A very brief (shorter than one line) error message.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin": print
+ print colorize("BAD", " * ") + msg
+ sys.stdout.flush()
+ self.__last_e_cmd = "eerror"
+
+ def einfo(self, msg):
+ """
+ Shows an informative message terminated with a newline.
+
+ @param msg: A very brief (shorter than one line) informative message.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin": print
+ print colorize("GOOD", " * ") + msg
+ sys.stdout.flush()
+ self.__last_e_cmd = "einfo"
+
+ def einfon(self, msg):
+ """
+ Shows an informative message terminated without a newline.
+
+ @param msg: A very brief (shorter than one line) informative message.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin": print
+ print colorize("GOOD", " * ") + msg ,
+ sys.stdout.flush()
+ self.__last_e_cmd = "einfon"
+
+ def ewarn(self, msg):
+ """
+ Shows a warning message.
+
+ @param msg: A very brief (shorter than one line) warning message.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin": print
+ print colorize("WARN", " * ") + msg
+ sys.stdout.flush()
+ self.__last_e_cmd = "ewarn"
+
+ def ewend(self, errno, *msg):
+ """
+ Indicates the completion of a process, optionally displaying a message
+ via L{ewarn} if the process's exit status isn't C{0}.
+
+ @param errno: A standard UNIX C{errno} code returned by processes upon
+ exit.
+ @type errno: IntType
+ @param msg: I{(optional)} A warning message, typically a standard UNIX
+ error string corresponding to C{errno}.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ self.__eend("ewend", errno, msg)
+ self.__last_e_cmd = "ewend"
diff --git a/pym/portage/selinux.py b/pym/portage/selinux.py
new file mode 100644
index 00000000..e4d80fa1
--- /dev/null
+++ b/pym/portage/selinux.py
@@ -0,0 +1,8 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+import selinux
+from selinux import is_selinux_enabled
+from selinux_aux import setexec, secure_symlink, secure_rename, \
+ secure_copy, secure_mkdir, getcontext, get_sid, get_lsid
diff --git a/pym/portage/update.py b/pym/portage/update.py
new file mode 100644
index 00000000..1a2a1d88
--- /dev/null
+++ b/pym/portage/update.py
@@ -0,0 +1,224 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+import errno, os, re, sys
+
+from portage_util import ConfigProtect, grabfile, new_protect_filename, \
+ normalize_path, write_atomic, writemsg
+from portage_exception import DirectoryNotFound, PortageException
+from portage_versions import ververify
+from portage_dep import dep_getkey, get_operator, isvalidatom, isjustname
+from portage_const import USER_CONFIG_PATH, WORLD_FILE
+
+ignored_dbentries = ("CONTENTS", "environment.bz2")
+
+def update_dbentry(update_cmd, mycontent):
+ if update_cmd[0] == "move":
+ old_value, new_value = update_cmd[1], update_cmd[2]
+ if mycontent.count(old_value):
+ old_value = re.escape(old_value);
+ mycontent = re.sub(old_value+"(:|$|\\s)", new_value+"\\1", mycontent)
+ def myreplace(matchobj):
+ if ververify(matchobj.group(2)):
+ return "%s-%s" % (new_value, matchobj.group(2))
+ else:
+ return "".join(matchobj.groups())
+ mycontent = re.sub("(%s-)(\\S*)" % old_value, myreplace, mycontent)
+ elif update_cmd[0] == "slotmove" and get_operator(update_cmd[1]) is None:
+ pkg, origslot, newslot = update_cmd[1:]
+ old_value = "%s:%s" % (pkg, origslot)
+ if mycontent.count(old_value):
+ old_value = re.escape(old_value)
+ new_value = "%s:%s" % (pkg, newslot)
+ mycontent = re.sub(old_value+"($|\\s)", new_value+"\\1", mycontent)
+ return mycontent
+
+def update_dbentries(update_iter, mydata):
+ """Performs update commands and returns a
+ dict containing only the updated items."""
+ updated_items = {}
+ for k, mycontent in mydata.iteritems():
+ if k not in ignored_dbentries:
+ orig_content = mycontent
+ for update_cmd in update_iter:
+ mycontent = update_dbentry(update_cmd, mycontent)
+ if mycontent != orig_content:
+ updated_items[k] = mycontent
+ return updated_items
+
+def fixdbentries(update_iter, dbdir):
+ """Performs update commands which result in search and replace operations
+ for each of the files in dbdir (excluding CONTENTS and environment.bz2).
+ Returns True when actual modifications are necessary and False otherwise."""
+ mydata = {}
+ for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
+ file_path = os.path.join(dbdir, myfile)
+ f = open(file_path, "r")
+ mydata[myfile] = f.read()
+ f.close()
+ updated_items = update_dbentries(update_iter, mydata)
+ for myfile, mycontent in updated_items.iteritems():
+ file_path = os.path.join(dbdir, myfile)
+ write_atomic(file_path, mycontent)
+ return len(updated_items) > 0
+
+def grab_updates(updpath, prev_mtimes=None):
+ """Returns all the updates from the given directory as a sorted list of
+ tuples, each containing (file_path, statobj, content). If prev_mtimes is
+ given then only updates with differing mtimes are considered."""
+ try:
+ mylist = os.listdir(updpath)
+ except OSError, oe:
+ if oe.errno == errno.ENOENT:
+ raise DirectoryNotFound(updpath)
+ raise
+ if prev_mtimes is None:
+ prev_mtimes = {}
+ # validate the file name (filter out CVS directory, etc...)
+ mylist = [myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"]
+ if len(mylist) == 0:
+ return []
+
+ # update names are mangled to make them sort properly
+ mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
+ mylist.sort()
+ mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]
+
+ update_data = []
+ for myfile in mylist:
+ file_path = os.path.join(updpath, myfile)
+ mystat = os.stat(file_path)
+ if file_path not in prev_mtimes or \
+ long(prev_mtimes[file_path]) != long(mystat.st_mtime):
+ f = open(file_path)
+ content = f.read()
+ f.close()
+ update_data.append((file_path, mystat, content))
+ return update_data
+
+def parse_updates(mycontent):
+ """Valid updates are returned as a list of split update commands."""
+ myupd = []
+ errors = []
+ mylines = mycontent.splitlines()
+ for myline in mylines:
+ mysplit = myline.split()
+ if len(mysplit) == 0:
+ continue
+ if mysplit[0] not in ("move", "slotmove"):
+ errors.append("ERROR: Update type not recognized '%s'" % myline)
+ continue
+ if mysplit[0] == "move":
+ if len(mysplit) != 3:
+ errors.append("ERROR: Update command invalid '%s'" % myline)
+ continue
+ orig_value, new_value = mysplit[1], mysplit[2]
+ for cp in (orig_value, new_value):
+ if not (isvalidatom(cp) and isjustname(cp)):
+ errors.append(
+ "ERROR: Malformed update entry '%s'" % myline)
+ continue
+ if mysplit[0] == "slotmove":
+ if len(mysplit)!=4:
+ errors.append("ERROR: Update command invalid '%s'" % myline)
+ continue
+ pkg, origslot, newslot = mysplit[1], mysplit[2], mysplit[3]
+ if not isvalidatom(pkg):
+ errors.append("ERROR: Malformed update entry '%s'" % myline)
+ continue
+
+ # The list of valid updates is filtered by continue statements above.
+ myupd.append(mysplit)
+ return myupd, errors
+
+def update_config_files(config_root, protect, protect_mask, update_iter):
+ """Perform global updates on /etc/portage/package.* and the world file.
+ config_root - location of files to update
+ protect - list of paths from CONFIG_PROTECT
+ protect_mask - list of paths from CONFIG_PROTECT_MASK
+ update_iter - list of update commands as returned from parse_updates()"""
+ config_root = normalize_path(config_root)
+ update_files = {}
+ file_contents = {}
+ myxfiles = ["package.mask", "package.unmask", \
+ "package.keywords", "package.use"]
+ myxfiles += [os.path.join("profile", x) for x in myxfiles]
+ abs_user_config = os.path.join(config_root,
+ USER_CONFIG_PATH.lstrip(os.path.sep))
+ recursivefiles = []
+ for x in myxfiles:
+ config_file = os.path.join(abs_user_config, x)
+ if os.path.isdir(config_file):
+ for parent, dirs, files in os.walk(config_file):
+ for y in dirs:
+ if y.startswith("."):
+ dirs.remove(y)
+ for y in files:
+ if y.startswith("."):
+ continue
+ recursivefiles.append(
+ os.path.join(parent, y)[len(abs_user_config) + 1:])
+ else:
+ recursivefiles.append(x)
+ myxfiles = recursivefiles
+ for x in myxfiles:
+ try:
+ myfile = open(os.path.join(abs_user_config, x),"r")
+ file_contents[x] = myfile.readlines()
+ myfile.close()
+ except IOError:
+ if file_contents.has_key(x):
+ del file_contents[x]
+ continue
+ worldlist = grabfile(os.path.join(config_root, WORLD_FILE))
+
+ for update_cmd in update_iter:
+ if update_cmd[0] == "move":
+ old_value, new_value = update_cmd[1], update_cmd[2]
+ #update world entries:
+ for x in range(0,len(worldlist)):
+ #update world entries, if any.
+ worldlist[x] = \
+ dep_transform(worldlist[x], old_value, new_value)
+
+ #update /etc/portage/packages.*
+ for x in file_contents:
+ for mypos in range(0,len(file_contents[x])):
+ line = file_contents[x][mypos]
+ if line[0] == "#" or not line.strip():
+ continue
+ myatom = line.split()[0]
+ if myatom.startswith("-"):
+ # package.mask supports incrementals
+ myatom = myatom[1:]
+ if not isvalidatom(myatom):
+ continue
+ key = dep_getkey(myatom)
+ if key == old_value:
+ file_contents[x][mypos] = \
+ line.replace(old_value, new_value)
+ update_files[x] = 1
+ sys.stdout.write("p")
+ sys.stdout.flush()
+
+ write_atomic(os.path.join(config_root, WORLD_FILE), "\n".join(worldlist))
+
+ protect_obj = ConfigProtect(
+ config_root, protect, protect_mask)
+ for x in update_files:
+ updating_file = os.path.join(abs_user_config, x)
+ if protect_obj.isprotected(updating_file):
+ updating_file = new_protect_filename(updating_file)
+ try:
+ write_atomic(updating_file, "".join(file_contents[x]))
+ except PortageException, e:
+ writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
+ writemsg("!!! An error occured while updating a config file:" + \
+ " '%s'\n" % updating_file, noiselevel=-1)
+ continue
+
+def dep_transform(mydep, oldkey, newkey):
+ if dep_getkey(mydep) == oldkey:
+ return mydep.replace(oldkey, newkey, 1)
+ return mydep
diff --git a/pym/portage/util.py b/pym/portage/util.py
new file mode 100644
index 00000000..cc5a566b
--- /dev/null
+++ b/pym/portage/util.py
@@ -0,0 +1,1037 @@
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+from portage_exception import PortageException, FileNotFound, \
+ OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem
+import portage_exception
+from portage_dep import isvalidatom
+
+import os, errno, shlex, stat, string, sys
+try:
+ import cPickle
+except ImportError:
+ import pickle as cPickle
+
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+
+noiselimit = 0
+
+def writemsg(mystr,noiselevel=0,fd=None):
+ """Prints out warning and debug messages based on the noiselimit setting"""
+ global noiselimit
+ if fd is None:
+ fd = sys.stderr
+ if noiselevel <= noiselimit:
+ fd.write(mystr)
+ fd.flush()
+
+def writemsg_stdout(mystr,noiselevel=0):
+ """Prints messages stdout based on the noiselimit setting"""
+ writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
+
+def normalize_path(mypath):
+ """
+ os.path.normpath("//foo") returns "//foo" instead of "/foo"
+ We dislike this behavior so we create our own normpath func
+ to fix it.
+ """
+ if mypath.startswith(os.path.sep):
+ # posixpath.normpath collapses 3 or more leading slashes to just 1.
+ return os.path.normpath(2*os.path.sep + mypath)
+ else:
+ return os.path.normpath(mypath)
+
+def grabfile(myfilename, compat_level=0, recursive=0):
+ """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
+ begins with a #, it is ignored, as are empty lines"""
+
+ mylines=grablines(myfilename, recursive)
+ newlines=[]
+ for x in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ myline=" ".join(x.split())
+ if not len(myline):
+ continue
+ if myline[0]=="#":
+ # Check if we have a compat-level string. BC-integration data.
+ # '##COMPAT==>N<==' 'some string attached to it'
+ mylinetest = myline.split("<==",1)
+ if len(mylinetest) == 2:
+ myline_potential = mylinetest[1]
+ mylinetest = mylinetest[0].split("##COMPAT==>")
+ if len(mylinetest) == 2:
+ if compat_level >= int(mylinetest[1]):
+ # It's a compat line, and the key matches.
+ newlines.append(myline_potential)
+ continue
+ else:
+ continue
+ newlines.append(myline)
+ return newlines
+
+def map_dictlist_vals(func,myDict):
+ """Performs a function on each value of each key in a dictlist.
+ Returns a new dictlist."""
+ new_dl = {}
+ for key in myDict.keys():
+ new_dl[key] = []
+ new_dl[key] = map(func,myDict[key])
+ return new_dl
+
+def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
+ """
+ Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->list.
+ Returns a single dict. Higher index in lists is preferenced.
+
+ Example usage:
+ >>> from portage_util import stack_dictlist
+ >>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
+ >>> {'a':'b','x':'y'}
+ >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
+ >>> {'a':['b','c'] }
+ >>> a = {'KEYWORDS':['x86','alpha']}
+ >>> b = {'KEYWORDS':['-x86']}
+ >>> print stack_dictlist( [a,b] )
+ >>> { 'KEYWORDS':['x86','alpha','-x86']}
+ >>> print stack_dictlist( [a,b], incremental=True)
+ >>> { 'KEYWORDS':['alpha'] }
+ >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
+ >>> { 'KEYWORDS':['alpha'] }
+
+ @param original_dicts a list of (dictionary objects or None)
+ @type list
+ @param incremental True or false depending on whether new keys should overwrite
+ keys which already exist.
+ @type boolean
+ @param incrementals A list of items that should be incremental (-foo removes foo from
+ the returned dict).
+ @type list
+ @param ignore_none Appears to be ignored, but probably was used long long ago.
+ @type boolean
+
+ """
+ final_dict = {}
+ for mydict in original_dicts:
+ if mydict is None:
+ continue
+ for y in mydict.keys():
+ if not y in final_dict:
+ final_dict[y] = []
+
+ for thing in mydict[y]:
+ if thing:
+ if incremental or y in incrementals:
+ if thing == "-*":
+ final_dict[y] = []
+ continue
+ elif thing.startswith("-"):
+ try:
+ final_dict[y].remove(thing[1:])
+ except ValueError:
+ pass
+ continue
+ if thing not in final_dict[y]:
+ final_dict[y].append(thing)
+ if y in final_dict and not final_dict[y]:
+ del final_dict[y]
+ return final_dict
+
+def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
+ """Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->string.
+ Returns a single dict."""
+ final_dict = None
+ for mydict in dicts:
+ if mydict is None:
+ if ignore_none:
+ continue
+ else:
+ return None
+ if final_dict is None:
+ final_dict = {}
+ for y in mydict.keys():
+ if mydict[y]:
+ if final_dict.has_key(y) and (incremental or (y in incrementals)):
+ final_dict[y] += " "+mydict[y][:]
+ else:
+ final_dict[y] = mydict[y][:]
+ mydict[y] = " ".join(mydict[y].split()) # Remove extra spaces.
+ return final_dict
+
+def stack_lists(lists, incremental=1):
+ """Stacks an array of list-types into one array. Optionally removing
+ distinct values using '-value' notation. Higher index is preferenced.
+
+ all elements must be hashable."""
+
+ new_list = {}
+ for x in lists:
+ for y in filter(None, x):
+ if incremental:
+ if y == "-*":
+ new_list.clear()
+ elif y.startswith("-"):
+ new_list.pop(y[1:], None)
+ else:
+ new_list[y] = True
+ else:
+ new_list[y] = True
+ return new_list.keys()
+
+def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
+ """
+ This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
+
+ @param myfilename: file to process
+ @type myfilename: string (path)
+ @param juststrings: only return strings
+ @type juststrings: Boolean (integer)
+ @param empty: Ignore certain lines
+ @type empty: Boolean (integer)
+ @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
+ @type recursive: Boolean (integer)
+ @param incremental: Append to the return list, don't overwrite
+ @type incremental: Boolean (integer)
+ @rtype: Dictionary
+ @returns:
+ 1. Returns the lines in a file in a dictionary, for example:
+ 'sys-apps/portage x86 amd64 ppc'
+ would return
+ { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
+ the line syntax is key : [list of values]
+ """
+ newdict={}
+ for x in grablines(myfilename, recursive):
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ if x[0] == "#":
+ continue
+ myline=x.split()
+ if len(myline) < 2 and empty == 0:
+ continue
+ if len(myline) < 1 and empty == 1:
+ continue
+ if incremental:
+ newdict.setdefault(myline[0], []).extend(myline[1:])
+ else:
+ newdict[myline[0]] = myline[1:]
+ if juststrings:
+ for k, v in newdict.iteritems():
+ newdict[k] = " ".join(v)
+ return newdict
+
+def grabdict_package(myfilename, juststrings=0, recursive=0):
+ pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
+ # We need to call keys() here in order to avoid the possibility of
+ # "RuntimeError: dictionary changed size during iteration"
+ # when an invalid atom is deleted.
+ for x in pkgs.keys():
+ if not isvalidatom(x):
+ del(pkgs[x])
+ writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x),
+ noiselevel=-1)
+ return pkgs
+
+def grabfile_package(myfilename, compatlevel=0, recursive=0):
+ pkgs=grabfile(myfilename, compatlevel, recursive=recursive)
+ for x in range(len(pkgs)-1, -1, -1):
+ pkg = pkgs[x]
+ if pkg[0] == "-":
+ pkg = pkg[1:]
+ if pkg[0] == "*": # Kill this so we can deal the "packages" file too
+ pkg = pkg[1:]
+ if not isvalidatom(pkg):
+ writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]),
+ noiselevel=-1)
+ del(pkgs[x])
+ return pkgs
+
+def grablines(myfilename,recursive=0):
+ mylines=[]
+ if recursive and os.path.isdir(myfilename):
+ if myfilename in ["RCS", "CVS", "SCCS"]:
+ return mylines
+ dirlist = os.listdir(myfilename)
+ dirlist.sort()
+ for f in dirlist:
+ if not f.startswith(".") and not f.endswith("~"):
+ mylines.extend(grablines(
+ os.path.join(myfilename, f), recursive))
+ else:
+ try:
+ myfile = open(myfilename, "r")
+ mylines = myfile.readlines()
+ myfile.close()
+ except IOError:
+ pass
+ return mylines
+
+def writedict(mydict,myfilename,writekey=True):
+ """Writes out a dict to a file; writekey=0 mode doesn't write out
+ the key and assumes all values are strings, not lists."""
+ myfile = None
+ try:
+ myfile = atomic_ofstream(myfilename)
+ if not writekey:
+ for x in mydict.values():
+ myfile.write(x+"\n")
+ else:
+ for x in mydict.keys():
+ myfile.write("%s %s\n" % (x, " ".join(mydict[x])))
+ myfile.close()
+ except IOError:
+ if myfile is not None:
+ myfile.abort()
+ return 0
+ return 1
+
+def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
+ mykeys={}
+ try:
+ f=open(mycfg,'r')
+ except IOError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ return None
+ try:
+ lex = shlex.shlex(f, posix=True)
+ lex.wordchars=string.digits+string.letters+"~!@#$%*_\:;?,./-+{}"
+ lex.quotes="\"'"
+ if allow_sourcing:
+ lex.source="source"
+ while 1:
+ key=lex.get_token()
+ if key == "export":
+ key = lex.get_token()
+ if key is None:
+ #normal end of file
+ break;
+ equ=lex.get_token()
+ if (equ==''):
+ #unexpected end of file
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n",
+ noiselevel=-1)
+ raise Exception("ParseError: Unexpected EOF: "+str(mycfg)+": on/before line "+str(lex.lineno))
+ else:
+ return mykeys
+ elif (equ!='='):
+ #invalid token
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Invalid token (not \"=\") "+str(equ)+"\n",
+ noiselevel=-1)
+ raise Exception("ParseError: Invalid token (not '='): "+str(mycfg)+": line "+str(lex.lineno))
+ else:
+ return mykeys
+ val=lex.get_token()
+ if val is None:
+ #unexpected end of file
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n",
+ noiselevel=-1)
+ raise portage_exception.CorruptionError("ParseError: Unexpected EOF: "+str(mycfg)+": line "+str(lex.lineno))
+ else:
+ return mykeys
+ if expand:
+ mykeys[key] = varexpand(val, mykeys)
+ else:
+ mykeys[key] = val
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ raise portage_exception.ParseError(str(e)+" in "+mycfg)
+ return mykeys
+
+#cache expansions of constant strings
+cexpand={}
+def varexpand(mystring,mydict={}):
+ newstring = cexpand.get(" "+mystring, None)
+ if newstring is not None:
+ return newstring
+
+ """
+ new variable expansion code. Removes quotes, handles \n, etc.
+ This code is used by the configfile code, as well as others (parser)
+ This would be a good bunch of code to port to C.
+ """
+ numvars=0
+ mystring=" "+mystring
+ #in single, double quotes
+ insing=0
+ indoub=0
+ pos=1
+ newstring=" "
+ while (pos<len(mystring)):
+ if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
+ if (indoub):
+ newstring=newstring+"'"
+ else:
+ insing=not insing
+ pos=pos+1
+ continue
+ elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
+ if (insing):
+ newstring=newstring+'"'
+ else:
+ indoub=not indoub
+ pos=pos+1
+ continue
+ if (not insing):
+ #expansion time
+ if (mystring[pos]=="\n"):
+ #convert newlines to spaces
+ newstring=newstring+" "
+ pos=pos+1
+ elif (mystring[pos]=="\\"):
+ #backslash expansion time
+ if (pos+1>=len(mystring)):
+ newstring=newstring+mystring[pos]
+ break
+ else:
+ a=mystring[pos+1]
+ pos=pos+2
+ if a=='a':
+ newstring=newstring+chr(007)
+ elif a=='b':
+ newstring=newstring+chr(010)
+ elif a=='e':
+ newstring=newstring+chr(033)
+ elif (a=='f') or (a=='n'):
+ newstring=newstring+chr(012)
+ elif a=='r':
+ newstring=newstring+chr(015)
+ elif a=='t':
+ newstring=newstring+chr(011)
+ elif a=='v':
+ newstring=newstring+chr(013)
+ elif a!='\n':
+ #remove backslash only, as bash does: this takes care of \\ and \' and \" as well
+ newstring=newstring+mystring[pos-1:pos]
+ continue
+ elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
+ pos=pos+1
+ if mystring[pos]=="{":
+ pos=pos+1
+ braced=True
+ else:
+ braced=False
+ myvstart=pos
+ validchars=string.ascii_letters+string.digits+"_"
+ while mystring[pos] in validchars:
+ if (pos+1)>=len(mystring):
+ if braced:
+ cexpand[mystring]=""
+ return ""
+ else:
+ pos=pos+1
+ break
+ pos=pos+1
+ myvarname=mystring[myvstart:pos]
+ if braced:
+ if mystring[pos]!="}":
+ cexpand[mystring]=""
+ return ""
+ else:
+ pos=pos+1
+ if len(myvarname)==0:
+ cexpand[mystring]=""
+ return ""
+ numvars=numvars+1
+ if mydict.has_key(myvarname):
+ newstring=newstring+mydict[myvarname]
+ else:
+ newstring=newstring+mystring[pos]
+ pos=pos+1
+ else:
+ newstring=newstring+mystring[pos]
+ pos=pos+1
+ if numvars==0:
+ cexpand[mystring]=newstring[1:]
+ return newstring[1:]
+
+def pickle_write(data,filename,debug=0):
+ import os
+ try:
+ myf=open(filename,"w")
+ cPickle.dump(data,myf,-1)
+ myf.flush()
+ myf.close()
+ writemsg("Wrote pickle: "+str(filename)+"\n",1)
+ os.chown(myefn,uid,portage_gid)
+ os.chmod(myefn,0664)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ return 0
+ return 1
+
+def pickle_read(filename,default=None,debug=0):
+ import os
+ if not os.access(filename, os.R_OK):
+ writemsg("pickle_read(): File not readable. '"+filename+"'\n",1)
+ return default
+ data = None
+ try:
+ myf = open(filename)
+ mypickle = cPickle.Unpickler(myf)
+ mypickle.find_global = None
+ data = mypickle.load()
+ myf.close()
+ del mypickle,myf
+ writemsg("pickle_read(): Loaded pickle. '"+filename+"'\n",1)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! Failed to load pickle: "+str(e)+"\n",1)
+ data = default
+ return data
+
+def dump_traceback(msg, noiselevel=1):
+ import sys, traceback
+ info = sys.exc_info()
+ if not info[2]:
+ stack = traceback.extract_stack()[:-1]
+ error = None
+ else:
+ stack = traceback.extract_tb(info[2])
+ error = str(info[1])
+ writemsg("\n====================================\n", noiselevel=noiselevel)
+ writemsg("%s\n\n" % msg, noiselevel=noiselevel)
+ for line in traceback.format_list(stack):
+ writemsg(line, noiselevel=noiselevel)
+ if error:
+ writemsg(error+"\n", noiselevel=noiselevel)
+ writemsg("====================================\n\n", noiselevel=noiselevel)
+
+def unique_array(s):
+ """lifted from python cookbook, credit: Tim Peters
+ Return a list of the elements in s in arbitrary order, sans duplicates"""
+ n = len(s)
+ # assume all elements are hashable, if so, it's linear
+ try:
+ return list(set(s))
+ except TypeError:
+ pass
+
+ # so much for linear. abuse sort.
+ try:
+ t = list(s)
+ t.sort()
+ except TypeError:
+ pass
+ else:
+ assert n > 0
+ last = t[0]
+ lasti = i = 1
+ while i < n:
+ if t[i] != last:
+ t[lasti] = last = t[i]
+ lasti += 1
+ i += 1
+ return t[:lasti]
+
+ # blah. back to original portage.unique_array
+ u = []
+ for x in s:
+ if x not in u:
+ u.append(x)
+ return u
+
+def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+ stat_cached=None, follow_links=True):
+ """Apply user, group, and mode bits to a file if the existing bits do not
+ already match. The default behavior is to force an exact match of mode
+ bits. When mask=0 is specified, mode bits on the target file are allowed
+ to be a superset of the mode argument (via logical OR). When mask>0, the
+ mode bits that the target file is allowed to have are restricted via
+ logical XOR.
+ Returns True if the permissions were modified and False otherwise."""
+
+ modified = False
+
+ if stat_cached is None:
+ try:
+ if follow_links:
+ stat_cached = os.stat(filename)
+ else:
+ stat_cached = os.lstat(filename)
+ except OSError, oe:
+ func_call = "stat('%s')" % filename
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ if (uid != -1 and uid != stat_cached.st_uid) or \
+ (gid != -1 and gid != stat_cached.st_gid):
+ try:
+ if follow_links:
+ os.chown(filename, uid, gid)
+ else:
+ import portage_data
+ portage_data.lchown(filename, uid, gid)
+ modified = True
+ except OSError, oe:
+ func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ new_mode = -1
+ st_mode = stat_cached.st_mode & 07777 # protect from unwanted bits
+ if mask >= 0:
+ if mode == -1:
+ mode = 0 # Don't add any mode bits when mode is unspecified.
+ else:
+ mode = mode & 07777
+ if (mode & st_mode != mode) or \
+ ((mask ^ st_mode) & st_mode != st_mode):
+ new_mode = mode | st_mode
+ new_mode = (mask ^ new_mode) & new_mode
+ elif mode != -1:
+ mode = mode & 07777 # protect from unwanted bits
+ if mode != st_mode:
+ new_mode = mode
+
+ # The chown system call may clear S_ISUID and S_ISGID
+ # bits, so those bits are restored if necessary.
+ if modified and new_mode == -1 and \
+ (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
+ if mode == -1:
+ new_mode = st_mode
+ else:
+ mode = mode & 07777
+ if mask >= 0:
+ new_mode = mode | st_mode
+ new_mode = (mask ^ new_mode) & new_mode
+ else:
+ new_mode = mode
+ if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
+ new_mode = -1
+
+ if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
+ # Mode doesn't matter for symlinks.
+ new_mode = -1
+
+ if new_mode != -1:
+ try:
+ os.chmod(filename, new_mode)
+ modified = True
+ except OSError, oe:
+ func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ raise
+ return modified
+
+def apply_stat_permissions(filename, newstat, **kwargs):
+ """A wrapper around apply_secpass_permissions that gets
+ uid, gid, and mode from a stat object"""
+ return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
+ mode=newstat.st_mode, **kwargs)
+
+def apply_recursive_permissions(top, uid=-1, gid=-1,
+ dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
+ """A wrapper around apply_secpass_permissions that applies permissions
+ recursively. If optional argument onerror is specified, it should be a
+ function; it will be called with one argument, a PortageException instance.
+ Returns True if all permissions are applied and False if some are left
+ unapplied."""
+
+ if onerror is None:
+ # Default behavior is to dump errors to stderr so they won't
+ # go unnoticed. Callers can pass in a quiet instance.
+ def onerror(e):
+ if isinstance(e, OperationNotPermitted):
+ writemsg("Operation Not Permitted: %s\n" % str(e),
+ noiselevel=-1)
+ elif isinstance(e, FileNotFound):
+ writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
+ else:
+ raise
+
+ all_applied = True
+ for dirpath, dirnames, filenames in os.walk(top):
+ try:
+ applied = apply_secpass_permissions(dirpath,
+ uid=uid, gid=gid, mode=dirmode, mask=dirmask)
+ if not applied:
+ all_applied = False
+ except PortageException, e:
+ all_applied = False
+ onerror(e)
+
+ for name in filenames:
+ try:
+ applied = apply_secpass_permissions(os.path.join(dirpath, name),
+ uid=uid, gid=gid, mode=filemode, mask=filemask)
+ if not applied:
+ all_applied = False
+ except PortageException, e:
+ all_applied = False
+ onerror(e)
+ return all_applied
+
+def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+ stat_cached=None, follow_links=True):
+ """A wrapper around apply_permissions that uses secpass and simple
+ logic to apply as much of the permissions as possible without
+ generating an obviously avoidable permission exception. Despite
+ attempts to avoid an exception, it's possible that one will be raised
+ anyway, so be prepared.
+ Returns True if all permissions are applied and False if some are left
+ unapplied."""
+
+ if stat_cached is None:
+ try:
+ if follow_links:
+ stat_cached = os.stat(filename)
+ else:
+ stat_cached = os.lstat(filename)
+ except OSError, oe:
+ func_call = "stat('%s')" % filename
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ all_applied = True
+
+ import portage_data # not imported globally because of circular dep
+ if portage_data.secpass < 2:
+
+ if uid != -1 and \
+ uid != stat_cached.st_uid:
+ all_applied = False
+ uid = -1
+
+ if gid != -1 and \
+ gid != stat_cached.st_gid and \
+ gid not in os.getgroups():
+ all_applied = False
+ gid = -1
+
+ apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
+ stat_cached=stat_cached, follow_links=follow_links)
+ return all_applied
+
+class atomic_ofstream(file):
+ """Write a file atomically via os.rename(). Atomic replacement prevents
+ interprocess interference and prevents corruption of the target
+ file when the write is interrupted (for example, when an 'out of space'
+ error occurs)."""
+
+ def __init__(self, filename, mode='w', follow_links=True, **kargs):
+ """Opens a temporary filename.pid in the same directory as filename."""
+ self._aborted = False
+
+ if follow_links:
+ canonical_path = os.path.realpath(filename)
+ self._real_name = canonical_path
+ tmp_name = "%s.%i" % (canonical_path, os.getpid())
+ try:
+ super(atomic_ofstream, self).__init__(tmp_name, mode=mode, **kargs)
+ return
+ except (OSError, IOError), e:
+ if canonical_path == filename:
+ raise
+ writemsg("!!! Failed to open file: '%s'\n" % tmp_name,
+ noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+
+ self._real_name = filename
+ tmp_name = "%s.%i" % (filename, os.getpid())
+ super(atomic_ofstream, self).__init__(tmp_name, mode=mode, **kargs)
+
+ def close(self):
+ """Closes the temporary file, copies permissions (if possible),
+ and performs the atomic replacement via os.rename(). If the abort()
+ method has been called, then the temp file is closed and removed."""
+ if not self.closed:
+ try:
+ super(atomic_ofstream, self).close()
+ if not self._aborted:
+ try:
+ apply_stat_permissions(self.name, os.stat(self._real_name))
+ except OperationNotPermitted:
+ pass
+ except FileNotFound:
+ pass
+ except OSError, oe: # from the above os.stat call
+ if oe.errno in (errno.ENOENT, errno.EPERM):
+ pass
+ else:
+ raise
+ os.rename(self.name, self._real_name)
+ finally:
+ # Make sure we cleanup the temp file
+ # even if an exception is raised.
+ try:
+ os.unlink(self.name)
+ except OSError, oe:
+ pass
+
+ def abort(self):
+ """If an error occurs while writing the file, the user should
+ call this method in order to leave the target file unchanged.
+ This will call close() automatically."""
+ if not self._aborted:
+ self._aborted = True
+ self.close()
+
+ def __del__(self):
+ """If the user does not explicitely call close(), it is
+ assumed that an error has occurred, so we abort()."""
+ if not self.closed:
+ self.abort()
+ # ensure destructor from the base class is called
+ base_destructor = getattr(super(atomic_ofstream, self), '__del__', None)
+ if base_destructor is not None:
+ base_destructor()
+
+def write_atomic(file_path, content):
+ f = None
+ try:
+ f = atomic_ofstream(file_path)
+ f.write(content)
+ f.close()
+ except (IOError, OSError), e:
+ if f:
+ f.abort()
+ func_call = "write_atomic('%s')" % file_path
+ if e.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif e.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif e.errno == errno.ENOENT:
+ raise FileNotFound(file_path)
+ else:
+ raise
+
+def ensure_dirs(dir_path, *args, **kwargs):
+ """Create a directory and call apply_permissions.
+ Returns True if a directory is created or the permissions needed to be
+ modified, and False otherwise."""
+
+ created_dir = False
+
+ try:
+ os.makedirs(dir_path)
+ created_dir = True
+ except OSError, oe:
+ func_call = "makedirs('%s')" % dir_path
+ if errno.EEXIST == oe.errno:
+ pass
+ elif oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ else:
+ raise
+ perms_modified = apply_permissions(dir_path, *args, **kwargs)
+ return created_dir or perms_modified
+
+class LazyItemsDict(dict):
+ """A mapping object that behaves like a standard dict except that it allows
+ for lazy initialization of values via callable objects. Lazy items can be
+ overwritten and deleted just as normal items."""
+ def __init__(self, initial_items=None):
+ dict.__init__(self)
+ self.lazy_items = {}
+ if initial_items is not None:
+ self.update(initial_items)
+ def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
+ """Add a lazy item for the given key. When the item is requested,
+ value_callable will be called with *pargs and **kwargs arguments."""
+ self.lazy_items[item_key] = (value_callable, pargs, kwargs)
+ # make it show up in self.keys(), etc...
+ dict.__setitem__(self, item_key, None)
+ def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
+ """This is like addLazyItem except value_callable will only be called
+ a maximum of 1 time and the result will be cached for future requests."""
+ class SingletonItem(object):
+ def __init__(self, value_callable, *pargs, **kwargs):
+ self._callable = value_callable
+ self._pargs = pargs
+ self._kwargs = kwargs
+ self._called = False
+ def __call__(self):
+ if not self._called:
+ self._called = True
+ self._value = self._callable(*self._pargs, **self._kwargs)
+ return self._value
+ self.addLazyItem(item_key, SingletonItem(value_callable, *pargs, **kwargs))
+ def update(self, map_obj):
+ if isinstance(map_obj, LazyItemsDict):
+ for k in map_obj:
+ if k in map_obj.lazy_items:
+ dict.__setitem__(self, k, None)
+ else:
+ dict.__setitem__(self, k, map_obj[k])
+ self.lazy_items.update(map_obj.lazy_items)
+ else:
+ dict.update(self, map_obj)
+ def __getitem__(self, item_key):
+ if item_key in self.lazy_items:
+ value_callable, pargs, kwargs = self.lazy_items[item_key]
+ return value_callable(*pargs, **kwargs)
+ else:
+ return dict.__getitem__(self, item_key)
+ def __setitem__(self, item_key, value):
+ if item_key in self.lazy_items:
+ del self.lazy_items[item_key]
+ dict.__setitem__(self, item_key, value)
+ def __delitem__(self, item_key):
+ if item_key in self.lazy_items:
+ del self.lazy_items[item_key]
+ dict.__delitem__(self, item_key)
+
+class ConfigProtect(object):
+ def __init__(self, myroot, protect_list, mask_list):
+ self.myroot = myroot
+ self.protect_list = protect_list
+ self.mask_list = mask_list
+ self.updateprotect()
+
+ def updateprotect(self):
+ """Update internal state for isprotected() calls. Nonexistent paths
+ are ignored."""
+ self.protect = []
+ self._dirs = set()
+ for x in self.protect_list:
+ ppath = normalize_path(
+ os.path.join(self.myroot, x.lstrip(os.path.sep)))
+ mystat = None
+ try:
+ if stat.S_ISDIR(os.stat(ppath).st_mode):
+ self._dirs.add(ppath)
+ self.protect.append(ppath)
+ except OSError:
+ # If it doesn't exist, there's no need to protect it.
+ pass
+
+ self.protectmask = []
+ for x in self.mask_list:
+ ppath = normalize_path(
+ os.path.join(self.myroot, x.lstrip(os.path.sep)))
+ mystat = None
+ try:
+ """Use lstat so that anything, even a broken symlink can be
+ protected."""
+ if stat.S_ISDIR(os.lstat(ppath).st_mode):
+ self._dirs.add(ppath)
+ self.protectmask.append(ppath)
+ """Now use stat in case this is a symlink to a directory."""
+ if stat.S_ISDIR(os.stat(ppath).st_mode):
+ self._dirs.add(ppath)
+ except OSError:
+ # If it doesn't exist, there's no need to mask it.
+ pass
+
+ def isprotected(self, obj):
+ """Returns True if obj is protected, False otherwise. The caller must
+ ensure that obj is normalized with a single leading slash. A trailing
+ slash is optional for directories."""
+ masked = 0
+ protected = 0
+ sep = os.path.sep
+ for ppath in self.protect:
+ if len(ppath) > masked and obj.startswith(ppath):
+ if ppath in self._dirs:
+ if obj != ppath and not obj.startswith(ppath + sep):
+ # /etc/foo does not match /etc/foobaz
+ continue
+ elif obj != ppath:
+ # force exact match when CONFIG_PROTECT lists a
+ # non-directory
+ continue
+ protected = len(ppath)
+ #config file management
+ for pmpath in self.protectmask:
+ if len(pmpath) >= protected and obj.startswith(pmpath):
+ if pmpath in self._dirs:
+ if obj != pmpath and \
+ not obj.startswith(pmpath + sep):
+ # /etc/foo does not match /etc/foobaz
+ continue
+ elif obj != pmpath:
+ # force exact match when CONFIG_PROTECT_MASK lists
+ # a non-directory
+ continue
+ #skip, it's in the mask
+ masked = len(pmpath)
+ return protected > masked
+
+def new_protect_filename(mydest, newmd5=None):
+ """Resolves a config-protect filename for merging, optionally
+ using the last filename if the md5 matches.
+ (dest,md5) ==> 'string' --- path_to_target_filename
+ (dest) ==> ('next', 'highest') --- next_target and most-recent_target
+ """
+
+ # config protection filename format:
+ # ._cfg0000_foo
+ # 0123456789012
+ prot_num = -1
+ last_pfile = ""
+
+ if not os.path.exists(mydest):
+ return mydest
+
+ real_filename = os.path.basename(mydest)
+ real_dirname = os.path.dirname(mydest)
+ for pfile in os.listdir(real_dirname):
+ if pfile[0:5] != "._cfg":
+ continue
+ if pfile[10:] != real_filename:
+ continue
+ try:
+ new_prot_num = int(pfile[5:9])
+ if new_prot_num > prot_num:
+ prot_num = new_prot_num
+ last_pfile = pfile
+ except ValueError:
+ continue
+ prot_num = prot_num + 1
+
+ new_pfile = normalize_path(os.path.join(real_dirname,
+ "._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
+ old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
+ if last_pfile and newmd5:
+ import portage_checksum
+ if portage_checksum.perform_md5(
+ os.path.join(real_dirname, last_pfile)) == newmd5:
+ return old_pfile
+ return new_pfile
diff --git a/pym/portage/versions.py b/pym/portage/versions.py
new file mode 100644
index 00000000..63d69bac
--- /dev/null
+++ b/pym/portage/versions.py
@@ -0,0 +1,314 @@
+# portage_versions.py -- core Portage functionality
+# Copyright 1998-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+import re
+
+ver_regexp = re.compile("^(cvs\\.)?(\\d+)((\\.\\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\\d*)*)(-r(\\d+))?$")
+suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
+suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
+endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
+
+from portage_exception import InvalidData
+
+def ververify(myver, silent=1):
+ if ver_regexp.match(myver):
+ return 1
+ else:
+ if not silent:
+ print "!!! syntax error in version: %s" % myver
+ return 0
+
+vercmp_cache = {}
+def vercmp(ver1, ver2, silent=1):
+ """
+ Compare two versions
+ Example usage:
+ >>> from portage_versions import vercmp
+ >>> vercmp('1.0-r1','1.2-r3')
+ negative number
+ >>> vercmp('1.3','1.2-r3')
+ positive number
+ >>> vercmp('1.0_p3','1.0_p3')
+ 0
+
+ @param pkg1: version to compare with (see ver_regexp in portage_versions.py)
+ @type pkg1: string (example: "2.1.2-r3")
+ @param pkg2: version to compare againts (see ver_regexp in portage_versions.py)
+ @type pkg2: string (example: "2.1.2_rc5")
+ @rtype: None or float
+ @return:
+ 1. positive if ver1 is greater than ver2
+ 2. negative if ver1 is less than ver2
+ 3. 0 if ver1 equals ver2
+ 4. None if ver1 or ver2 are invalid (see ver_regexp in portage_versions.py)
+ """
+
+ if ver1 == ver2:
+ return 0
+ mykey=ver1+":"+ver2
+ try:
+ return vercmp_cache[mykey]
+ except KeyError:
+ pass
+ match1 = ver_regexp.match(ver1)
+ match2 = ver_regexp.match(ver2)
+
+ # checking that the versions are valid
+ if not match1 or not match1.groups():
+ if not silent:
+ print "!!! syntax error in version: %s" % ver1
+ return None
+ if not match2 or not match2.groups():
+ if not silent:
+ print "!!! syntax error in version: %s" % ver2
+ return None
+
+ # shortcut for cvs ebuilds (new style)
+ if match1.group(1) and not match2.group(1):
+ vercmp_cache[mykey] = 1
+ return 1
+ elif match2.group(1) and not match1.group(1):
+ vercmp_cache[mykey] = -1
+ return -1
+
+ # building lists of the version parts before the suffix
+ # first part is simple
+ list1 = [int(match1.group(2))]
+ list2 = [int(match2.group(2))]
+
+ # this part would greatly benefit from a fixed-length version pattern
+ if len(match1.group(3)) or len(match2.group(3)):
+ vlist1 = match1.group(3)[1:].split(".")
+ vlist2 = match2.group(3)[1:].split(".")
+ for i in range(0, max(len(vlist1), len(vlist2))):
+ # Implcit .0 is given a value of -1, so that 1.0.0 > 1.0, since it
+ # would be ambiguous if two versions that aren't literally equal
+ # are given the same value (in sorting, for example).
+ if len(vlist1) <= i or len(vlist1[i]) == 0:
+ list1.append(-1)
+ list2.append(int(vlist2[i]))
+ elif len(vlist2) <= i or len(vlist2[i]) == 0:
+ list1.append(int(vlist1[i]))
+ list2.append(-1)
+ # Let's make life easy and use integers unless we're forced to use floats
+ elif (vlist1[i][0] != "0" and vlist2[i][0] != "0"):
+ list1.append(int(vlist1[i]))
+ list2.append(int(vlist2[i]))
+ # now we have to use floats so 1.02 compares correctly against 1.1
+ else:
+ list1.append(float("0."+vlist1[i]))
+ list2.append(float("0."+vlist2[i]))
+
+ # and now the final letter
+ if len(match1.group(5)):
+ list1.append(ord(match1.group(5)))
+ if len(match2.group(5)):
+ list2.append(ord(match2.group(5)))
+
+ for i in range(0, max(len(list1), len(list2))):
+ if len(list1) <= i:
+ vercmp_cache[mykey] = -1
+ return -1
+ elif len(list2) <= i:
+ vercmp_cache[mykey] = 1
+ return 1
+ elif list1[i] != list2[i]:
+ vercmp_cache[mykey] = list1[i] - list2[i]
+ return list1[i] - list2[i]
+
+ # main version is equal, so now compare the _suffix part
+ list1 = match1.group(6).split("_")[1:]
+ list2 = match2.group(6).split("_")[1:]
+
+ for i in range(0, max(len(list1), len(list2))):
+ if len(list1) <= i:
+ s1 = ("p","0")
+ else:
+ s1 = suffix_regexp.match(list1[i]).groups()
+ if len(list2) <= i:
+ s2 = ("p","0")
+ else:
+ s2 = suffix_regexp.match(list2[i]).groups()
+ if s1[0] != s2[0]:
+ return suffix_value[s1[0]] - suffix_value[s2[0]]
+ if s1[1] != s2[1]:
+ # it's possible that the s(1|2)[1] == ''
+ # in such a case, fudge it.
+ try: r1 = int(s1[1])
+ except ValueError: r1 = 0
+ try: r2 = int(s2[1])
+ except ValueError: r2 = 0
+ return r1 - r2
+
+ # the suffix part is equal to, so finally check the revision
+ if match1.group(10):
+ r1 = int(match1.group(10))
+ else:
+ r1 = 0
+ if match2.group(10):
+ r2 = int(match2.group(10))
+ else:
+ r2 = 0
+ vercmp_cache[mykey] = r1 - r2
+ return r1 - r2
+
+def pkgcmp(pkg1, pkg2):
+ """
+ Compare 2 package versions created in pkgsplit format.
+
+ Example usage:
+ >>> from portage_versions import *
+ >>> pkgcmp(pkgsplit('test-1.0-r1'),pkgsplit('test-1.2-r3'))
+ -1
+ >>> pkgcmp(pkgsplit('test-1.3'),pkgsplit('test-1.2-r3'))
+ 1
+
+ @param pkg1: package to compare with
+ @type pkg1: list (example: ['test', '1.0', 'r1'])
+ @param pkg2: package to compare againts
+ @type pkg2: list (example: ['test', '1.0', 'r1'])
+ @rtype: None or integer
+ @return:
+ 1. None if package names are not the same
+ 2. 1 if pkg1 is greater than pkg2
+ 3. -1 if pkg1 is less than pkg2
+ 4. 0 if pkg1 equals pkg2
+ """
+ if pkg1[0] != pkg2[0]:
+ return None
+ mycmp=vercmp(pkg1[1],pkg2[1])
+ if mycmp>0:
+ return 1
+ if mycmp<0:
+ return -1
+ r1=float(pkg1[2][1:])
+ r2=float(pkg2[2][1:])
+ if r1>r2:
+ return 1
+ if r2>r1:
+ return -1
+ return 0
+
+
+pkgcache={}
+
+def pkgsplit(mypkg,silent=1):
+ try:
+ if not pkgcache[mypkg]:
+ return None
+ return pkgcache[mypkg][:]
+ except KeyError:
+ pass
+ myparts=mypkg.split("-")
+
+ if len(myparts)<2:
+ if not silent:
+ print "!!! Name error in",mypkg+": missing a version or name part."
+ pkgcache[mypkg]=None
+ return None
+ for x in myparts:
+ if len(x)==0:
+ if not silent:
+ print "!!! Name error in",mypkg+": empty \"-\" part."
+ pkgcache[mypkg]=None
+ return None
+
+ #verify rev
+ revok=0
+ myrev=myparts[-1]
+ if len(myrev) and myrev[0]=="r":
+ try:
+ int(myrev[1:])
+ revok=1
+ except ValueError: # from int()
+ pass
+ if revok:
+ verPos = -2
+ revision = myparts[-1]
+ else:
+ verPos = -1
+ revision = "r0"
+
+ if ververify(myparts[verPos]):
+ if len(myparts)== (-1*verPos):
+ pkgcache[mypkg]=None
+ return None
+ else:
+ for x in myparts[:verPos]:
+ if ververify(x):
+ pkgcache[mypkg]=None
+ return None
+ #names can't have versiony looking parts
+ myval=["-".join(myparts[:verPos]),myparts[verPos],revision]
+ pkgcache[mypkg]=myval
+ return myval
+ else:
+ pkgcache[mypkg]=None
+ return None
+
+_valid_category = re.compile("^\w[\w-]*")
+
+catcache={}
+def catpkgsplit(mydata,silent=1):
+ """
+ Takes a Category/Package-Version-Rev and returns a list of each.
+
+ @param mydata: Data to split
+ @type mydata: string
+ @param silent: suppress error messages
+ @type silent: Boolean (integer)
+ @rype: list
+ @return:
+ 1. If each exists, it returns [cat, pkgname, version, rev]
+ 2. If cat is not specificed in mydata, cat will be "null"
+ 3. if rev does not exist it will be '-r0'
+ 4. If cat is invalid (specified but has incorrect syntax)
+ an InvalidData Exception will be thrown
+ """
+
+ # Categories may contain a-zA-z0-9+_- but cannot start with -
+ global _valid_category
+ import portage_dep
+ try:
+ if not catcache[mydata]:
+ return None
+ return catcache[mydata][:]
+ except KeyError:
+ pass
+ mysplit=mydata.split("/")
+ p_split=None
+ if len(mysplit)==1:
+ retval=["null"]
+ p_split=pkgsplit(mydata,silent=silent)
+ elif len(mysplit)==2:
+ if portage_dep._dep_check_strict and \
+ not _valid_category.match(mysplit[0]):
+ raise InvalidData("Invalid category in %s" %mydata )
+ retval=[mysplit[0]]
+ p_split=pkgsplit(mysplit[1],silent=silent)
+ if not p_split:
+ catcache[mydata]=None
+ return None
+ retval.extend(p_split)
+ catcache[mydata]=retval
+ return retval
+
+def catsplit(mydep):
+ return mydep.split("/", 1)
+
+def best(mymatches):
+ """Accepts None arguments; assumes matches are valid."""
+ if mymatches is None:
+ return ""
+ if not len(mymatches):
+ return ""
+ bestmatch = mymatches[0]
+ p2 = catpkgsplit(bestmatch)[1:]
+ for x in mymatches[1:]:
+ p1 = catpkgsplit(x)[1:]
+ if pkgcmp(p1, p2) > 0:
+ bestmatch = x
+ p2 = catpkgsplit(bestmatch)[1:]
+ return bestmatch
diff --git a/pym/portage/xpak.py b/pym/portage/xpak.py
new file mode 100644
index 00000000..b7ef582e
--- /dev/null
+++ b/pym/portage/xpak.py
@@ -0,0 +1,421 @@
+# Copyright 2001-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+# The format for a tbz2/xpak:
+#
+# tbz2: tar.bz2 + xpak + (xpak_offset) + "STOP"
+# xpak: "XPAKPACK" + (index_len) + (data_len) + index + data + "XPAKSTOP"
+# index: (pathname_len) + pathname + (data_offset) + (data_len)
+# index entries are concatenated end-to-end.
+# data: concatenated data chunks, end-to-end.
+#
+# [tarball]XPAKPACKIIIIDDDD[index][data]XPAKSTOPOOOOSTOP
+#
+# (integer) == encodeint(integer) ===> 4 characters (big-endian copy)
+# '+' means concatenate the fields ===> All chunks are strings
+
+import sys,os,shutil,errno
+from stat import *
+
+def addtolist(mylist,curdir):
+ """(list, dir) --- Takes an array(list) and appends all files from dir down
+ the directory tree. Returns nothing. list is modified."""
+ for x in os.listdir("."):
+ if os.path.isdir(x):
+ os.chdir(x)
+ addtolist(mylist,curdir+x+"/")
+ os.chdir("..")
+ else:
+ if curdir+x not in mylist:
+ mylist.append(curdir+x)
+
+def encodeint(myint):
+ """Takes a 4 byte integer and converts it into a string of 4 characters.
+ Returns the characters in a string."""
+ part1=chr((myint >> 24 ) & 0x000000ff)
+ part2=chr((myint >> 16 ) & 0x000000ff)
+ part3=chr((myint >> 8 ) & 0x000000ff)
+ part4=chr(myint & 0x000000ff)
+ return part1+part2+part3+part4
+
+def decodeint(mystring):
+ """Takes a 4 byte string and converts it into a 4 byte integer.
+ Returns an integer."""
+ myint=0
+ myint=myint+ord(mystring[3])
+ myint=myint+(ord(mystring[2]) << 8)
+ myint=myint+(ord(mystring[1]) << 16)
+ myint=myint+(ord(mystring[0]) << 24)
+ return myint
+
+def xpak(rootdir,outfile=None):
+ """(rootdir,outfile) -- creates an xpak segment of the directory 'rootdir'
+ and under the name 'outfile' if it is specified. Otherwise it returns the
+ xpak segment."""
+ try:
+ origdir=os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ os.chdir(rootdir)
+ mylist=[]
+
+ addtolist(mylist,"")
+ mylist.sort()
+ mydata = {}
+ for x in mylist:
+ a = open(x, "r")
+ mydata[x] = a.read()
+ a.close()
+ os.chdir(origdir)
+
+ xpak_segment = xpak_mem(mydata)
+ if outfile:
+ outf = open(outfile, "w")
+ outf.write(xpak_segment)
+ outf.close()
+ else:
+ return xpak_segment
+
+def xpak_mem(mydata):
+ """Create an xpack segement from a map object."""
+ indexglob=""
+ indexpos=0
+ dataglob=""
+ datapos=0
+ for x, newglob in mydata.iteritems():
+ mydatasize=len(newglob)
+ indexglob=indexglob+encodeint(len(x))+x+encodeint(datapos)+encodeint(mydatasize)
+ indexpos=indexpos+4+len(x)+4+4
+ dataglob=dataglob+newglob
+ datapos=datapos+mydatasize
+ return "XPAKPACK" \
+ + encodeint(len(indexglob)) \
+ + encodeint(len(dataglob)) \
+ + indexglob \
+ + dataglob \
+ + "XPAKSTOP"
+
+def xsplit(infile):
+ """(infile) -- Splits the infile into two files.
+ 'infile.index' contains the index segment.
+ 'infile.dat' contails the data segment."""
+ myfile=open(infile,"r")
+ mydat=myfile.read()
+ myfile.close()
+
+ splits = xsplit_mem(mydat)
+ if not splits:
+ return False
+
+ myfile=open(infile+".index","w")
+ myfile.write(splits[0])
+ myfile.close()
+ myfile=open(infile+".dat","w")
+ myfile.write(splits[1])
+ myfile.close()
+ return True
+
+def xsplit_mem(mydat):
+ if mydat[0:8]!="XPAKPACK":
+ return None
+ if mydat[-8:]!="XPAKSTOP":
+ return None
+ indexsize=decodeint(mydat[8:12])
+ datasize=decodeint(mydat[12:16])
+ return (mydat[16:indexsize+16], mydat[indexsize+16:-8])
+
+def getindex(infile):
+ """(infile) -- grabs the index segment from the infile and returns it."""
+ myfile=open(infile,"r")
+ myheader=myfile.read(16)
+ if myheader[0:8]!="XPAKPACK":
+ myfile.close()
+ return
+ indexsize=decodeint(myheader[8:12])
+ myindex=myfile.read(indexsize)
+ myfile.close()
+ return myindex
+
+def getboth(infile):
+ """(infile) -- grabs the index and data segments from the infile.
+ Returns an array [indexSegment,dataSegment]"""
+ myfile=open(infile,"r")
+ myheader=myfile.read(16)
+ if myheader[0:8]!="XPAKPACK":
+ myfile.close()
+ return
+ indexsize=decodeint(myheader[8:12])
+ datasize=decodeint(myheader[12:16])
+ myindex=myfile.read(indexsize)
+ mydata=myfile.read(datasize)
+ myfile.close()
+ return myindex, mydata
+
+def listindex(myindex):
+ """Print to the terminal the filenames listed in the indexglob passed in."""
+ for x in getindex_mem(myindex):
+ print x
+
+def getindex_mem(myindex):
+ """Returns the filenames listed in the indexglob passed in."""
+ myindexlen=len(myindex)
+ startpos=0
+ myret=[]
+ while ((startpos+8)<myindexlen):
+ mytestlen=decodeint(myindex[startpos:startpos+4])
+ myret=myret+[myindex[startpos+4:startpos+4+mytestlen]]
+ startpos=startpos+mytestlen+12
+ return myret
+
+def searchindex(myindex,myitem):
+ """(index,item) -- Finds the offset and length of the file 'item' in the
+ datasegment via the index 'index' provided."""
+ mylen=len(myitem)
+ myindexlen=len(myindex)
+ startpos=0
+ while ((startpos+8)<myindexlen):
+ mytestlen=decodeint(myindex[startpos:startpos+4])
+ if mytestlen==mylen:
+ if myitem==myindex[startpos+4:startpos+4+mytestlen]:
+ #found
+ datapos=decodeint(myindex[startpos+4+mytestlen:startpos+8+mytestlen]);
+ datalen=decodeint(myindex[startpos+8+mytestlen:startpos+12+mytestlen]);
+ return datapos, datalen
+ startpos=startpos+mytestlen+12
+
+def getitem(myid,myitem):
+ myindex=myid[0]
+ mydata=myid[1]
+ myloc=searchindex(myindex,myitem)
+ if not myloc:
+ return None
+ return mydata[myloc[0]:myloc[0]+myloc[1]]
+
+def xpand(myid,mydest):
+ myindex=myid[0]
+ mydata=myid[1]
+ try:
+ origdir=os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ os.chdir(mydest)
+ myindexlen=len(myindex)
+ startpos=0
+ while ((startpos+8)<myindexlen):
+ namelen=decodeint(myindex[startpos:startpos+4])
+ datapos=decodeint(myindex[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(myindex[startpos+8+namelen:startpos+12+namelen]);
+ myname=myindex[startpos+4:startpos+4+namelen]
+ dirname=os.path.dirname(myname)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat=open(myname,"w")
+ mydat.write(mydata[datapos:datapos+datalen])
+ mydat.close()
+ startpos=startpos+namelen+12
+ os.chdir(origdir)
+
+class tbz2:
+ def __init__(self,myfile):
+ self.file=myfile
+ self.filestat=None
+ self.index=""
+ self.infosize=0
+ self.xpaksize=0
+ self.indexsize=None
+ self.datasize=None
+ self.indexpos=None
+ self.datapos=None
+ self.scan()
+
+ def decompose(self,datadir,cleanup=1):
+ """Alias for unpackinfo() --- Complement to recompose() but optionally
+ deletes the destination directory. Extracts the xpak from the tbz2 into
+ the directory provided. Raises IOError if scan() fails.
+ Returns result of upackinfo()."""
+ if not self.scan():
+ raise IOError
+ if cleanup:
+ self.cleanup(datadir)
+ if not os.path.exists(datadir):
+ os.makedirs(datadir)
+ return self.unpackinfo(datadir)
+ def compose(self,datadir,cleanup=0):
+ """Alias for recompose()."""
+ return recompose(datadir,cleanup)
+ def recompose(self,datadir,cleanup=0):
+ """Creates an xpak segment from the datadir provided, truncates the tbz2
+ to the end of regular data if an xpak segment already exists, and adds
+ the new segment to the file with terminating info."""
+ xpdata = xpak(datadir)
+ self.recompose_mem(xpdata)
+ if cleanup:
+ self.cleanup(datadir)
+
+ def recompose_mem(self, xpdata):
+ self.scan() # Don't care about condition... We'll rewrite the data anyway.
+ myfile=open(self.file,"a+")
+ if not myfile:
+ raise IOError
+ myfile.seek(-self.xpaksize,2) # 0,2 or -0,2 just mean EOF.
+ myfile.truncate()
+ myfile.write(xpdata+encodeint(len(xpdata))+"STOP")
+ myfile.flush()
+ myfile.close()
+ return 1
+
+ def cleanup(self, datadir):
+ datadir_split = os.path.split(datadir)
+ if len(datadir_split) >= 2 and len(datadir_split[1]) > 0:
+ # This is potentially dangerous,
+ # thus the above sanity check.
+ try:
+ shutil.rmtree(datadir)
+ except OSError, oe:
+ if oe.errno == errno.ENOENT:
+ pass
+ else:
+ raise oe
+
+ def scan(self):
+ """Scans the tbz2 to locate the xpak segment and setup internal values.
+ This function is called by relevant functions already."""
+ try:
+ mystat=os.stat(self.file)
+ if self.filestat:
+ changed=0
+ for x in [ST_SIZE, ST_MTIME, ST_CTIME]:
+ if mystat[x] != self.filestat[x]:
+ changed=1
+ if not changed:
+ return 1
+ self.filestat=mystat
+ a=open(self.file,"r")
+ a.seek(-16,2)
+ trailer=a.read()
+ self.infosize=0
+ self.xpaksize=0
+ if trailer[-4:]!="STOP":
+ a.close()
+ return 0
+ if trailer[0:8]!="XPAKSTOP":
+ a.close()
+ return 0
+ self.infosize=decodeint(trailer[8:12])
+ self.xpaksize=self.infosize+8
+ a.seek(-(self.xpaksize),2)
+ header=a.read(16)
+ if header[0:8]!="XPAKPACK":
+ a.close()
+ return 0
+ self.indexsize=decodeint(header[8:12])
+ self.datasize=decodeint(header[12:16])
+ self.indexpos=a.tell()
+ self.index=a.read(self.indexsize)
+ self.datapos=a.tell()
+ a.close()
+ return 2
+ except SystemExit, e:
+ raise
+ except:
+ return 0
+
+ def filelist(self):
+ """Return an array of each file listed in the index."""
+ if not self.scan():
+ return None
+ return getindex_mem(self.index)
+
+ def getfile(self,myfile,mydefault=None):
+ """Finds 'myfile' in the data segment and returns it."""
+ if not self.scan():
+ return None
+ myresult=searchindex(self.index,myfile)
+ if not myresult:
+ return mydefault
+ a=open(self.file,"r")
+ a.seek(self.datapos+myresult[0],0)
+ myreturn=a.read(myresult[1])
+ a.close()
+ return myreturn
+
+ def getelements(self,myfile):
+ """A split/array representation of tbz2.getfile()"""
+ mydat=self.getfile(myfile)
+ if not mydat:
+ return []
+ return mydat.split()
+
+ def unpackinfo(self,mydest):
+ """Unpacks all the files from the dataSegment into 'mydest'."""
+ if not self.scan():
+ return 0
+ try:
+ origdir=os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ a=open(self.file,"r")
+ if not os.path.exists(mydest):
+ os.makedirs(mydest)
+ os.chdir(mydest)
+ startpos=0
+ while ((startpos+8)<self.indexsize):
+ namelen=decodeint(self.index[startpos:startpos+4])
+ datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
+ myname=self.index[startpos+4:startpos+4+namelen]
+ dirname=os.path.dirname(myname)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat=open(myname,"w")
+ a.seek(self.datapos+datapos)
+ mydat.write(a.read(datalen))
+ mydat.close()
+ startpos=startpos+namelen+12
+ a.close()
+ os.chdir(origdir)
+ return 1
+
+ def get_data(self):
+ """Returns all the files from the dataSegment as a map object."""
+ if not self.scan():
+ return 0
+ a = open(self.file, "r")
+ mydata = {}
+ startpos=0
+ while ((startpos+8)<self.indexsize):
+ namelen=decodeint(self.index[startpos:startpos+4])
+ datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
+ myname=self.index[startpos+4:startpos+4+namelen]
+ a.seek(self.datapos+datapos)
+ mydata[myname] = a.read(datalen)
+ startpos=startpos+namelen+12
+ a.close()
+ return mydata
+
+ def getboth(self):
+ """Returns an array [indexSegment,dataSegment]"""
+ if not self.scan():
+ return None
+
+ a = open(self.file,"r")
+ a.seek(self.datapos)
+ mydata =a.read(self.datasize)
+ a.close()
+
+ return self.index, mydata
+