summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarius Mauch <genone@gentoo.org>2007-01-25 15:49:26 +0000
committerMarius Mauch <genone@gentoo.org>2007-01-25 15:49:26 +0000
commit3b08c21101b0801d7c5d6c145a27bef5cd42078c (patch)
tree2eea73b311d67b567410670630335796bf0a272c
parentdemote KEYWORDS.missing to a warning to make KEYWORDS='' valid (diff)
downloadportage-multirepo-3b08c21101b0801d7c5d6c145a27bef5cd42078c.tar.gz
portage-multirepo-3b08c21101b0801d7c5d6c145a27bef5cd42078c.tar.bz2
portage-multirepo-3b08c21101b0801d7c5d6c145a27bef5cd42078c.zip
Namespace sanitizing, step 1
svn path=/main/trunk/; revision=5778
l---------[-rw-r--r--]pym/cvstree.py296
l---------[-rw-r--r--]pym/dispatch_conf.py162
l---------[-rw-r--r--]pym/eclass_cache.py84
l---------[-rw-r--r--]pym/emergehelp.py421
l---------[-rw-r--r--]pym/getbinpkg.py573
l---------[-rw-r--r--]pym/output.py394
-rw-r--r--pym/portage/__init__.py (renamed from pym/portage.py)0
-rw-r--r--pym/portage/cache/__init__.py (renamed from pym/cache/__init__.py)0
-rw-r--r--pym/portage/cache/anydbm.py (renamed from pym/cache/anydbm.py)0
-rw-r--r--pym/portage/cache/cache_errors.py (renamed from pym/cache/cache_errors.py)0
-rw-r--r--pym/portage/cache/flat_hash.py (renamed from pym/cache/flat_hash.py)0
-rw-r--r--pym/portage/cache/flat_list.py (renamed from pym/cache/flat_list.py)0
-rw-r--r--pym/portage/cache/fs_template.py (renamed from pym/cache/fs_template.py)0
-rw-r--r--pym/portage/cache/mappings.py (renamed from pym/cache/mappings.py)0
-rw-r--r--pym/portage/cache/metadata.py (renamed from pym/cache/metadata.py)0
-rw-r--r--pym/portage/cache/metadata_overlay.py (renamed from pym/cache/metadata_overlay.py)0
-rw-r--r--pym/portage/cache/sql_template.py (renamed from pym/cache/sql_template.py)0
-rw-r--r--pym/portage/cache/sqlite.py (renamed from pym/cache/sqlite.py)0
-rw-r--r--pym/portage/cache/template.py (renamed from pym/cache/template.py)0
-rw-r--r--pym/portage/cache/util.py (renamed from pym/cache/util.py)0
-rw-r--r--pym/portage/cache/volatile.py (renamed from pym/cache/volatile.py)0
-rw-r--r--pym/portage/checksum.py219
-rw-r--r--pym/portage/const.py65
-rw-r--r--pym/portage/cvstree.py295
-rw-r--r--pym/portage/data.py126
-rw-r--r--pym/portage/debug.py115
-rw-r--r--pym/portage/dep.py646
-rw-r--r--pym/portage/dispatch_conf.py161
-rw-r--r--pym/portage/eclass_cache.py83
-rw-r--r--pym/portage/elog_modules/__init__.py (renamed from pym/elog_modules/__init__.py)0
-rw-r--r--pym/portage/elog_modules/mod_custom.py (renamed from pym/elog_modules/mod_custom.py)0
-rw-r--r--pym/portage/elog_modules/mod_mail.py (renamed from pym/elog_modules/mod_mail.py)0
-rw-r--r--pym/portage/elog_modules/mod_mail_summary.py (renamed from pym/elog_modules/mod_mail_summary.py)0
-rw-r--r--pym/portage/elog_modules/mod_save.py (renamed from pym/elog_modules/mod_save.py)0
-rw-r--r--pym/portage/elog_modules/mod_save_summary.py (renamed from pym/elog_modules/mod_save_summary.py)0
-rw-r--r--pym/portage/elog_modules/mod_syslog.py (renamed from pym/elog_modules/mod_syslog.py)0
-rw-r--r--pym/portage/emergehelp.py420
-rw-r--r--pym/portage/exception.py100
-rw-r--r--pym/portage/exec.py336
-rw-r--r--pym/portage/getbinpkg.py572
-rw-r--r--pym/portage/gpg.py149
-rw-r--r--pym/portage/localization.py21
-rw-r--r--pym/portage/locks.py312
-rw-r--r--pym/portage/mail.py89
-rw-r--r--pym/portage/manifest.py618
-rw-r--r--pym/portage/news.py268
-rw-r--r--pym/portage/output.py393
-rw-r--r--pym/portage/selinux.py8
-rw-r--r--pym/portage/update.py224
-rw-r--r--pym/portage/util.py1037
-rw-r--r--pym/portage/versions.py314
-rw-r--r--pym/portage/xpak.py421
l---------[-rw-r--r--]pym/portage_checksum.py220
l---------[-rw-r--r--]pym/portage_const.py66
l---------[-rw-r--r--]pym/portage_data.py127
l---------[-rw-r--r--]pym/portage_debug.py116
l---------[-rw-r--r--]pym/portage_dep.py647
l---------[-rw-r--r--]pym/portage_exception.py101
l---------[-rw-r--r--]pym/portage_exec.py337
l---------[-rw-r--r--]pym/portage_gpg.py150
l---------[-rw-r--r--]pym/portage_localization.py22
l---------[-rw-r--r--]pym/portage_locks.py313
l---------[-rw-r--r--]pym/portage_mail.py90
l---------[-rw-r--r--]pym/portage_manifest.py619
l---------[-rw-r--r--]pym/portage_news.py269
l---------[-rw-r--r--]pym/portage_selinux.py9
l---------[-rw-r--r--]pym/portage_update.py225
l---------[-rw-r--r--]pym/portage_util.py1038
l---------[-rw-r--r--]pym/portage_versions.py315
l---------[-rw-r--r--]pym/xpak.py422
70 files changed, 7016 insertions, 6992 deletions
diff --git a/pym/cvstree.py b/pym/cvstree.py
index 30f143cd..64d92942 100644..120000
--- a/pym/cvstree.py
+++ b/pym/cvstree.py
@@ -1,295 +1 @@
-# cvstree.py -- cvs tree utilities
-# Copyright 1998-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-import os,time,sys,re
-from stat import *
-
-# [D]/Name/Version/Date/Flags/Tags
-
-def pathdata(entries, path):
- """(entries,path)
- Returns the data(dict) for a specific file/dir at the path specified."""
- mysplit=path.split("/")
- myentries=entries
- mytarget=mysplit[-1]
- mysplit=mysplit[:-1]
- for mys in mysplit:
- if myentries["dirs"].has_key(mys):
- myentries=myentries["dirs"][mys]
- else:
- return None
- if myentries["dirs"].has_key(mytarget):
- return myentries["dirs"][mytarget]
- elif myentries["files"].has_key(mytarget):
- return myentries["files"][mytarget]
- else:
- return None
-
-def fileat(entries, path):
- return pathdata(entries,path)
-
-def isadded(entries, path):
- """(entries,path)
- Returns true if the path exists and is added to the cvs tree."""
- mytarget=pathdata(entries, path)
- if mytarget:
- if "cvs" in mytarget["status"]:
- return 1
-
- basedir=os.path.dirname(path)
- filename=os.path.basename(path)
-
- try:
- myfile=open(basedir+"/CVS/Entries","r")
- except IOError:
- return 0
- mylines=myfile.readlines()
- myfile.close()
-
- rep=re.compile("^\/"+re.escape(filename)+"\/");
- for x in mylines:
- if rep.search(x):
- return 1
-
- return 0
-
-def findnew(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that have been added but
- have not yet been committed. Returns a list of paths, optionally prepended
- with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
- for myfile in entries["files"].keys():
- if "cvs" in entries["files"][myfile]["status"]:
- if "0" == entries["files"][myfile]["revision"]:
- mylist.append(basedir+myfile)
- if recursive:
- for mydir in entries["dirs"].keys():
- mylist+=findnew(entries["dirs"][mydir],recursive,basedir+mydir)
- return mylist
-
-def findchanged(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that exist in the cvs tree
- and differ from the committed version. Returns a list of paths, optionally
- prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
- for myfile in entries["files"].keys():
- if "cvs" in entries["files"][myfile]["status"]:
- if "current" not in entries["files"][myfile]["status"]:
- if "exists" in entries["files"][myfile]["status"]:
- if entries["files"][myfile]["revision"]!="0":
- mylist.append(basedir+myfile)
- if recursive:
- for mydir in entries["dirs"].keys():
- mylist+=findchanged(entries["dirs"][mydir],recursive,basedir+mydir)
- return mylist
-
-def findmissing(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that are listed in the cvs
- tree but do not exist on the filesystem. Returns a list of paths,
- optionally prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
- for myfile in entries["files"].keys():
- if "cvs" in entries["files"][myfile]["status"]:
- if "exists" not in entries["files"][myfile]["status"]:
- if "removed" not in entries["files"][myfile]["status"]:
- mylist.append(basedir+myfile)
- if recursive:
- for mydir in entries["dirs"].keys():
- mylist+=findmissing(entries["dirs"][mydir],recursive,basedir+mydir)
- return mylist
-
-def findunadded(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that are in valid cvs
- directories but are not part of the cvs tree. Returns a list of paths,
- optionally prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
-
- #ignore what cvs ignores.
- for myfile in entries["files"].keys():
- if "cvs" not in entries["files"][myfile]["status"]:
- mylist.append(basedir+myfile)
- if recursive:
- for mydir in entries["dirs"].keys():
- mylist+=findunadded(entries["dirs"][mydir],recursive,basedir+mydir)
- return mylist
-
-def findremoved(entries,recursive=0,basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all elements that are in flagged for cvs
- deletions. Returns a list of paths, optionally prepended with a basedir."""
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mylist=[]
- for myfile in entries["files"].keys():
- if "removed" in entries["files"][myfile]["status"]:
- mylist.append(basedir+myfile)
- if recursive:
- for mydir in entries["dirs"].keys():
- mylist+=findremoved(entries["dirs"][mydir],recursive,basedir+mydir)
- return mylist
-
-def findall(entries, recursive=0, basedir=""):
- """(entries,recursive=0,basedir="")
- Recurses the entries tree to find all new, changed, missing, and unadded
- entities. Returns a 4 element list of lists as returned from each find*()."""
-
- if basedir and basedir[-1]!="/":
- basedir=basedir+"/"
- mynew = findnew(entries,recursive,basedir)
- mychanged = findchanged(entries,recursive,basedir)
- mymissing = findmissing(entries,recursive,basedir)
- myunadded = findunadded(entries,recursive,basedir)
- myremoved = findremoved(entries,recursive,basedir)
- return [mynew, mychanged, mymissing, myunadded, myremoved]
-
-ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
-def apply_cvsignore_filter(list):
- x=0
- while x < len(list):
- if ignore_list.match(list[x].split("/")[-1]):
- list.pop(x)
- else:
- x+=1
- return list
-
-def getentries(mydir,recursive=0):
- """(basedir,recursive=0)
- Scans the given directory and returns an datadict of all the entries in
- the directory seperated as a dirs dict and a files dict."""
- myfn=mydir+"/CVS/Entries"
- # entries=[dirs, files]
- entries={"dirs":{},"files":{}}
- if not os.path.exists(mydir):
- return entries
- try:
- myfile=open(myfn, "r")
- mylines=myfile.readlines()
- myfile.close()
- except SystemExit, e:
- raise
- except:
- mylines=[]
- for line in mylines:
- if line and line[-1]=="\n":
- line=line[:-1]
- if not line:
- continue
- if line=="D": # End of entries file
- break
- mysplit=line.split("/")
- if len(mysplit)!=6:
- print "Confused:",mysplit
- continue
- if mysplit[0]=="D":
- entries["dirs"][mysplit[1]]={"dirs":{},"files":{},"status":[]}
- entries["dirs"][mysplit[1]]["status"]=["cvs"]
- if os.path.isdir(mydir+"/"+mysplit[1]):
- entries["dirs"][mysplit[1]]["status"]+=["exists"]
- entries["dirs"][mysplit[1]]["flags"]=mysplit[2:]
- if recursive:
- rentries=getentries(mydir+"/"+mysplit[1],recursive)
- #print rentries.keys()
- #print entries["files"].keys()
- #print entries["files"][mysplit[1]]
- entries["dirs"][mysplit[1]]["dirs"]=rentries["dirs"]
- entries["dirs"][mysplit[1]]["files"]=rentries["files"]
- else:
- # [D]/Name/revision/Date/Flags/Tags
- entries["files"][mysplit[1]]={}
- entries["files"][mysplit[1]]["revision"]=mysplit[2]
- entries["files"][mysplit[1]]["date"]=mysplit[3]
- entries["files"][mysplit[1]]["flags"]=mysplit[4]
- entries["files"][mysplit[1]]["tags"]=mysplit[5]
- entries["files"][mysplit[1]]["status"]=["cvs"]
- if entries["files"][mysplit[1]]["revision"][0]=="-":
- entries["files"][mysplit[1]]["status"]+=["removed"]
-
- for file in apply_cvsignore_filter(os.listdir(mydir)):
- if file=="CVS":
- continue
- if file=="digest-framerd-2.4.3":
- print mydir,file
- if os.path.isdir(mydir+"/"+file):
- if not entries["dirs"].has_key(file):
- entries["dirs"][file]={"dirs":{},"files":{}}
- if entries["dirs"][file].has_key("status"):
- if "exists" not in entries["dirs"][file]["status"]:
- entries["dirs"][file]["status"]+=["exists"]
- else:
- entries["dirs"][file]["status"]=["exists"]
- elif os.path.isfile(mydir+"/"+file):
- if file=="digest-framerd-2.4.3":
- print "isfile"
- if not entries["files"].has_key(file):
- entries["files"][file]={"revision":"","date":"","flags":"","tags":""}
- if entries["files"][file].has_key("status"):
- if file=="digest-framerd-2.4.3":
- print "has status"
- if "exists" not in entries["files"][file]["status"]:
- if file=="digest-framerd-2.4.3":
- print "no exists in status"
- entries["files"][file]["status"]+=["exists"]
- else:
- if file=="digest-framerd-2.4.3":
- print "no status"
- entries["files"][file]["status"]=["exists"]
- try:
- if file=="digest-framerd-2.4.3":
- print "stat'ing"
- mystat=os.stat(mydir+"/"+file)
- mytime=time.asctime(time.gmtime(mystat[ST_MTIME]))
- if not entries["files"][file].has_key("status"):
- if file=="digest-framerd-2.4.3":
- print "status not set"
- entries["files"][file]["status"]=[]
- if file=="digest-framerd-2.4.3":
- print "date:",entries["files"][file]["date"]
- print "sdate:",mytime
- if mytime==entries["files"][file]["date"]:
- entries["files"][file]["status"]+=["current"]
- if file=="digest-framerd-2.4.3":
- print "stat done"
-
- del mystat
- except SystemExit, e:
- raise
- except Exception, e:
- print "failed to stat",file
- print e
- return
-
- else:
- print
- print "File of unknown type:",mydir+"/"+file
- print
- return entries
-
-#class cvstree:
-# def __init__(self,basedir):
-# self.refdir=os.cwd()
-# self.basedir=basedir
-# self.entries={}
-# self.entries["dirs"]={}
-# self.entries["files"]={}
-# self.entries["dirs"][self.basedir]=getentries(self.basedir)
-# self.getrealdirs(self.dirs, self.files)
-# def getrealdirs(self,dirs,files):
-# for mydir in dirs.keys():
-# list = os.listdir(
-
-
+portage/cvstree.py \ No newline at end of file
diff --git a/pym/dispatch_conf.py b/pym/dispatch_conf.py
index 690772bf..12dcece7 100644..120000
--- a/pym/dispatch_conf.py
+++ b/pym/dispatch_conf.py
@@ -1,161 +1 @@
-# archive_conf.py -- functionality common to archive-conf and dispatch-conf
-# Copyright 2003-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-# Library by Wayne Davison <gentoo@blorf.net>, derived from code
-# written by Jeremy Wohl (http://igmus.org)
-
-from stat import *
-import os, sys, commands, shutil
-
-import portage
-
-RCS_BRANCH = '1.1.1'
-RCS_LOCK = 'rcs -ko -M -l'
-RCS_PUT = 'ci -t-"Archived config file." -m"dispatch-conf update."'
-RCS_GET = 'co'
-RCS_MERGE = 'rcsmerge -p -r' + RCS_BRANCH + ' %s >%s'
-
-DIFF3_MERGE = 'diff3 -mE %s %s %s >%s'
-
-def read_config(mandatory_opts):
- try:
- opts = portage.getconfig('/etc/dispatch-conf.conf')
- except:
- opts = None
-
- if not opts:
- print >> sys.stderr, 'dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'
- sys.exit(1)
-
- for key in mandatory_opts:
- if not opts.has_key(key):
- if key == "merge":
- opts["merge"] = "sdiff --suppress-common-lines --output=%s %s %s"
- else:
- print >> sys.stderr, 'dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal' % (key,)
-
- if not os.path.exists(opts['archive-dir']):
- os.mkdir(opts['archive-dir'])
- elif not os.path.isdir(opts['archive-dir']):
- print >> sys.stderr, 'dispatch-conf: Config archive dir [%s] must exist; fatal' % (opts['archive-dir'],)
- sys.exit(1)
-
- return opts
-
-
-def rcs_archive(archive, curconf, newconf, mrgconf):
- """Archive existing config in rcs (on trunk). Then, if mrgconf is
- specified and an old branch version exists, merge the user's changes
- and the distributed changes and put the result into mrgconf. Lastly,
- if newconf was specified, leave it in the archive dir with a .dist.new
- suffix along with the last 1.1.1 branch version with a .dist suffix."""
-
- try:
- os.makedirs(os.path.dirname(archive))
- except:
- pass
-
- try:
- shutil.copy2(curconf, archive)
- except(IOError, os.error), why:
- print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
- (curconf, archive, str(why))
- if os.path.exists(archive + ',v'):
- os.system(RCS_LOCK + ' ' + archive)
- os.system(RCS_PUT + ' ' + archive)
-
- ret = 0
- if newconf != '':
- os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
- has_branch = os.path.exists(archive)
- if has_branch:
- os.rename(archive, archive + '.dist')
-
- try:
- shutil.copy2(newconf, archive)
- except(IOError, os.error), why:
- print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
- (newconf, archive, str(why))
-
- if has_branch:
- if mrgconf != '':
- # This puts the results of the merge into mrgconf.
- ret = os.system(RCS_MERGE % (archive, mrgconf))
- mystat = os.lstat(newconf)
- os.chmod(mrgconf, mystat[ST_MODE])
- os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
- os.rename(archive, archive + '.dist.new')
- return ret
-
-
-def file_archive(archive, curconf, newconf, mrgconf):
- """Archive existing config to the archive-dir, bumping old versions
- out of the way into .# versions (log-rotate style). Then, if mrgconf
- was specified and there is a .dist version, merge the user's changes
- and the distributed changes and put the result into mrgconf. Lastly,
- if newconf was specified, archive it as a .dist.new version (which
- gets moved to the .dist version at the end of the processing)."""
-
- try:
- os.makedirs(os.path.dirname(archive))
- except:
- pass
-
- # Archive the current config file if it isn't already saved
- if os.path.exists(archive) \
- and len(commands.getoutput('diff -aq %s %s' % (curconf,archive))) != 0:
- suf = 1
- while suf < 9 and os.path.exists(archive + '.' + str(suf)):
- suf += 1
-
- while suf > 1:
- os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
- suf -= 1
-
- os.rename(archive, archive + '.1')
-
- try:
- shutil.copy2(curconf, archive)
- except(IOError, os.error), why:
- print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
- (curconf, archive, str(why))
-
- if newconf != '':
- # Save off new config file in the archive dir with .dist.new suffix
- try:
- shutil.copy2(newconf, archive + '.dist.new')
- except(IOError, os.error), why:
- print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
- (newconf, archive + '.dist.new', str(why))
-
- ret = 0
- if mrgconf != '' and os.path.exists(archive + '.dist'):
- # This puts the results of the merge into mrgconf.
- ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
- mystat = os.lstat(newconf)
- os.chmod(mrgconf, mystat[ST_MODE])
- os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
-
- return ret
-
-
-def rcs_archive_post_process(archive):
- """Check in the archive file with the .dist.new suffix on the branch
- and remove the one with the .dist suffix."""
- os.rename(archive + '.dist.new', archive)
- if os.path.exists(archive + '.dist'):
- # Commit the last-distributed version onto the branch.
- os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
- os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
- os.unlink(archive + '.dist')
- else:
- # Forcefully commit the last-distributed version onto the branch.
- os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
-
-
-def file_archive_post_process(archive):
- """Rename the archive file with the .dist.new suffix to a .dist suffix"""
- os.rename(archive + '.dist.new', archive + '.dist')
+portage/dispatch_conf.py \ No newline at end of file
diff --git a/pym/eclass_cache.py b/pym/eclass_cache.py
index 91b98fec..c7d7db2c 100644..120000
--- a/pym/eclass_cache.py
+++ b/pym/eclass_cache.py
@@ -1,83 +1 @@
-# Copyright: 2005 Gentoo Foundation
-# Author(s): Nicholas Carpaski (carpaski@gentoo.org), Brian Harring (ferringb@gentoo.org)
-# License: GPL2
-# $Id$
-
-from portage_util import normalize_path, writemsg
-import os, sys
-from portage_data import portage_gid
-
-class cache:
- """
- Maintains the cache information about eclasses used in ebuild.
- """
- def __init__(self, porttree_root, overlays=[]):
- self.porttree_root = porttree_root
-
- self.eclasses = {} # {"Name": ("location","_mtime_")}
- self._eclass_locations = {}
-
- # screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
- # ~harring
- self.porttrees = [self.porttree_root]+overlays
- self.porttrees = tuple(map(normalize_path, self.porttrees))
- self._master_eclass_root = os.path.join(self.porttrees[0],"eclass")
- self.update_eclasses()
-
- def close_caches(self):
- import traceback
- traceback.print_stack()
- print "%s close_cache is deprecated" % self.__class__
- self.eclasses.clear()
-
- def flush_cache(self):
- import traceback
- traceback.print_stack()
- print "%s flush_cache is deprecated" % self.__class__
-
- self.update_eclasses()
-
- def update_eclasses(self):
- self.eclasses = {}
- self._eclass_locations = {}
- eclass_len = len(".eclass")
- for x in [normalize_path(os.path.join(y,"eclass")) for y in self.porttrees]:
- if not os.path.isdir(x):
- continue
- for y in [y for y in os.listdir(x) if y.endswith(".eclass")]:
- try:
- mtime = long(os.stat(os.path.join(x, y)).st_mtime)
- except OSError:
- continue
- ys=y[:-eclass_len]
- self.eclasses[ys] = (x, long(mtime))
- self._eclass_locations[ys] = x
-
- def is_eclass_data_valid(self, ec_dict):
- if not isinstance(ec_dict, dict):
- return False
- for eclass, tup in ec_dict.iteritems():
- cached_data = self.eclasses.get(eclass, None)
- """ Only use the mtime for validation since the probability of a
- collision is small and, depending on the cache implementation, the
- path may not be specified (cache from rsync mirrors, for example).
- """
- if cached_data is None or tup[1] != cached_data[1]:
- return False
-
- return True
-
- def get_eclass_data(self, inherits, from_master_only=False):
- ec_dict = {}
- for x in inherits:
- try:
- ec_dict[x] = self.eclasses[x]
- except KeyError:
- print "ec=",ec_dict
- print "inherits=",inherits
- raise
- if from_master_only and \
- self._eclass_locations[x] != self._master_eclass_root:
- return None
-
- return ec_dict
+portage/eclass_cache.py \ No newline at end of file
diff --git a/pym/emergehelp.py b/pym/emergehelp.py
index 373e0bf4..b3d29019 100644..120000
--- a/pym/emergehelp.py
+++ b/pym/emergehelp.py
@@ -1,420 +1 @@
-# Copyright 1999-2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-import os,sys
-from output import bold, turquoise, green
-
-def shorthelp():
- print
- print
- print bold("Usage:")
- print " "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuildfile")+" | "+turquoise("tbz2file")+" | "+turquoise("dependency")+" ] [ ... ]"
- print " "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("system")+" | "+turquoise("world")+" >"
- print " "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >"
- print " "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]"
- print " "+turquoise("emerge")+" "+turquoise("--help")+" [ "+green("system")+" | "+green("world")+" | "+green("config")+" | "+green("--sync")+" ] "
- print bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhikKlnNoOpqPsStuvV")+"] ["+green("--oneshot")+"] ["+green("--newuse")+"] ["+green("--noconfmem")+"]"
- print " [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ] [ "+green("--columns")+" ]"
- print " ["+green("--nospinner")+"]"
- print " [ "+green("--deep")+" ] [" + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ]"
- print bold("Actions:")+" [ "+green("--clean")+" | "+green("--depclean")+" | "+green("--prune")+" | "+green("--regen")+" | "+green("--search")+" | "+green("--unmerge")+" ]"
- print
-
-def help(myaction,myopts,havecolor=1):
- if not myaction and ("--help" not in myopts):
- shorthelp()
- print
- print " For more help try 'emerge --help' or consult the man page."
- print
- elif not myaction:
- shorthelp()
- print
- print turquoise("Help (this screen):")
- print " "+green("--help")+" ("+green("-h")+" short option)"
- print " Displays this help; an additional argument (see above) will tell"
- print " emerge to display detailed help."
- print
- print turquoise("Actions:")
- print " "+green("--clean")+" ("+green("-c")+" short option)"
- print " Cleans the system by removing outdated packages which will not"
- print " remove functionalities or prevent your system from working."
- print " The arguments can be in several different formats :"
- print " * world "
- print " * system or"
- print " * 'dependency specification' (in single quotes is best.)"
- print " Here are a few examples of the dependency specification format:"
- print " "+bold("binutils")+" matches"
- print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
- print " "+bold("sys-devel/binutils")+" matches"
- print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
- print " "+bold(">sys-devel/binutils-2.11.90.0.7")+" matches"
- print " binutils-2.11.92.0.12.3-r1"
- print " "+bold(">=sys-devel/binutils-2.11.90.0.7")+" matches"
- print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
- print " "+bold("<=sys-devel/binutils-2.11.92.0.12.3-r1")+" matches"
- print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
- print
- print " "+green("--config")
- print " Runs package-specific operations that must be executed after an"
- print " emerge process has completed. This usually entails configuration"
- print " file setup or other similar setups that the user may wish to run."
- print
- print " "+green("--depclean")
- print " Cleans the system by removing packages that are not associated"
- print " with explicitly merged packages. Depclean works by creating the"
- print " full dependency tree from the system list and the world file,"
- print " then comparing it to installed packages. Packages installed, but"
- print " not associated with an explicit merge are listed as candidates"
- print " for unmerging."+turquoise(" WARNING: This can seriously affect your system by")
- print " "+turquoise("removing packages that may have been linked against, but due to")
- print " "+turquoise("changes in USE flags may no longer be part of the dep tree. Use")
- print " "+turquoise("caution when employing this feature.")
- print
- print " "+green("--info")
- print " Displays important portage variables that will be exported to"
- print " ebuild.sh when performing merges. This information is useful"
- print " for bug reports and verification of settings. All settings in"
- print " make.{conf,globals,defaults} and the environment show up if"
- print " run with the '--verbose' flag."
- print
- print " "+green("--metadata")
- print " Transfers metadata cache from ${PORTDIR}/metadata/cache/ to"
- print " /var/cache/edb/dep/ as is normally done on the tail end of an"
- print " rsync update using " + bold("emerge --sync") + ". This process populates the"
- print " cache database that portage uses for pre-parsed lookups of"
- print " package data. It does not populate cache for the overlays"
- print " listed in PORTDIR_OVERLAY. In order to generate cache for"
- print " overlays, use " + bold("--regen") + "."
- print
- print " "+green("--prune")+" ("+green("-P")+" short option)"
- print " "+turquoise("WARNING: This action can remove important packages!")
- print " Removes all but the most recently installed version of a package"
- print " from your system. This action doesn't verify the possible binary"
- print " compatibility between versions and can thus remove essential"
- print " dependencies from your system."
- print " The argument format is the same as for the "+bold("--clean")+" action."
- print
- print " "+green("--regen")
- print " Causes portage to check and update the dependency cache of all"
- print " ebuilds in the portage tree. This is not recommended for rsync"
- print " users as rsync updates the cache using server-side caches."
- print " Rsync users should simply 'emerge --sync' to regenerate."
- print
- print " "+green("--resume")
- print " Resumes the last merge operation. It can be treated just like a"
- print " regular emerge: --pretend and other options work alongside it."
- print " 'emerge --resume' only returns an error on failure. When there is"
- print " nothing to do, it exits with a message and a success condition."
- print
- print " "+green("--search")+" ("+green("-s")+" short option)"
- print " Searches for matches of the supplied string in the current local"
- print " portage tree. By default emerge uses a case-insensitive simple "
- print " search, but you can enable a regular expression search by "
- print " prefixing the search string with %."
- print " Prepending the expression with a '@' will cause the category to"
- print " be included in the search."
- print " A few examples:"
- print " "+bold("emerge --search libc")
- print " list all packages that contain libc in their name"
- print " "+bold("emerge --search '%^kde'")
- print " list all packages starting with kde"
- print " "+bold("emerge --search '%gcc$'")
- print " list all packages ending with gcc"
- print " "+bold("emerge --search '%@^dev-java.*jdk'")
- print " list all available Java JDKs"
- print
- print " "+green("--searchdesc")+" ("+green("-S")+" short option)"
- print " Matches the search string against the description field as well"
- print " the package's name. Take caution as the descriptions are also"
- print " matched as regular expressions."
- print " emerge -S html"
- print " emerge -S applet"
- print " emerge -S 'perl.*module'"
- print
- print " "+green("--unmerge")+" ("+green("-C")+" short option)"
- print " "+turquoise("WARNING: This action can remove important packages!")
- print " Removes all matching packages "+bold("completely")+" from"
- print " your system. Specify arguments using the dependency specification"
- print " format described in the "+bold("--clean")+" action above."
- print
- print " "+green("--update")+" ("+green("-u")+" short option)"
- print " Updates packages to the best version available, which may not"
- print " always be the highest version number due to masking for testing"
- print " and development. This will also update direct dependencies which"
- print " may not what you want. Package atoms specified on the command line"
- print " are greedy, meaning that unspecific atoms may match multiple"
- print " installed versions of slotted packages."
- print
- print " "+green("--version")+" ("+green("-V")+" short option)"
- print " Displays the currently installed version of portage along with"
- print " other information useful for quick reference on a system. See"
- print " "+bold("emerge info")+" for more advanced information."
- print
- print turquoise("Options:")
- print " "+green("--alphabetical")
- print " When displaying USE and other flag output, combines the enabled"
- print " and disabled flags into a single list and sorts it alphabetically."
- print " With this option, output such as USE=\"dar -bar -foo\" will instead"
- print " be displayed as USE=\"-bar dar -foo\""
- print
- print " "+green("--ask")+" ("+green("-a")+" short option)"
- print " before performing the merge, display what ebuilds and tbz2s will"
- print " be installed, in the same format as when using --pretend; then"
- print " ask whether to continue with the merge or abort. Using --ask is"
- print " more efficient than using --pretend and then executing the same"
- print " command without --pretend, as dependencies will only need to be"
- print " calculated once. WARNING: If the \"Enter\" key is pressed at the"
- print " prompt (with no other input), it is interpreted as acceptance of"
- print " the first choice. Note that the input buffer is not cleared prior"
- print " to the prompt, so an accidental press of the \"Enter\" key at any"
- print " time prior to the prompt will be interpreted as a choice!"
- print
- print " "+green("--buildpkg")+" ("+green("-b")+" short option)"
- print " Tell emerge to build binary packages for all ebuilds processed"
- print " (in addition to actually merging the packages. Useful for"
- print " maintainers or if you administrate multiple Gentoo Linux"
- print " systems (build once, emerge tbz2s everywhere) as well as disaster"
- print " recovery."
- print
- print " "+green("--buildpkgonly")+" ("+green("-B")+" short option)"
- print " Creates a binary package, but does not merge it to the"
- print " system. This has the restriction that unsatisfied dependencies"
- print " must not exist for the desired package as they cannot be used if"
- print " they do not exist on the system."
- print
- print " "+green("--changelog")+" ("+green("-l")+" short option)"
- print " When pretending, also display the ChangeLog entries for packages"
- print " that will be upgraded."
- print
- print " "+green("--color") + " < " + turquoise("y") + " | "+ turquoise("n")+" >"
- print " Enable or disable color output. This option will override NOCOLOR"
- print " (see make.conf(5)) and may also be used to force color output when"
- print " stdout is not a tty (by default, color is disabled unless stdout"
- print " is a tty)."
- print
- print " "+green("--columns")
- print " Display the pretend output in a tabular form. Versions are"
- print " aligned vertically."
- print
- print " "+green("--debug")+" ("+green("-d")+" short option)"
- print " Tell emerge to run the ebuild command in --debug mode. In this"
- print " mode, the bash build environment will run with the -x option,"
- print " causing it to output verbose debug information print to stdout."
- print " --debug is great for finding bash syntax errors as providing"
- print " very verbose information about the dependency and build process."
- print
- print " "+green("--deep")+" ("+green("-D")+" short option)"
- print " This flag forces emerge to consider the entire dependency tree of"
- print " packages, instead of checking only the immediate dependencies of"
- print " the packages. As an example, this catches updates in libraries"
- print " that are not directly listed in the dependencies of a package."
- print " Also see --with-bdeps for behavior with respect to build time"
- print " dependencies that are not strictly required."
- print
- print " "+green("--emptytree")+" ("+green("-e")+" short option)"
- print " Virtually tweaks the tree of installed packages to contain"
- print " nothing. This is great to use together with --pretend. This makes"
- print " it possible for developers to get a complete overview of the"
- print " complete dependency tree of a certain package."
- print
- print " "+green("--fetchonly")+" ("+green("-f")+" short option)"
- print " Instead of doing any package building, just perform fetches for"
- print " all packages (main package as well as all dependencies.) When"
- print " used in combination with --pretend all the SRC_URIs will be"
- print " displayed multiple mirrors per line, one line per file."
- print
- print " "+green("--fetch-all-uri")+" ("+green("-F")+" short option)"
- print " Same as --fetchonly except that all package files, including those"
- print " not required to build the package, will be processed."
- print
- print " "+green("--getbinpkg")+" ("+green("-g")+" short option)"
- print " Using the server and location defined in PORTAGE_BINHOST, portage"
- print " will download the information from each binary file there and it"
- print " will use that information to help build the dependency list. This"
- print " option implies '-k'. (Use -gK for binary-only merging.)"
- print
- print " "+green("--getbinpkgonly")+" ("+green("-G")+" short option)"
- print " This option is identical to -g, as above, except it will not use"
- print " ANY information from the local machine. All binaries will be"
- print " downloaded from the remote server without consulting packages"
- print " existing in the packages directory."
- print
- print " "+green("--newuse")+" ("+green("-N")+" short option)"
- print " Tells emerge to include installed packages where USE flags have "
- print " changed since installation."
- print
- print " "+green("--noconfmem")
- print " Portage keeps track of files that have been placed into"
- print " CONFIG_PROTECT directories, and normally it will not merge the"
- print " same file more than once, as that would become annoying. This"
- print " can lead to problems when the user wants the file in the case"
- print " of accidental deletion. With this option, files will always be"
- print " merged to the live fs instead of silently dropped."
- print
- print " "+green("--nodeps")+" ("+green("-O")+" short option)"
- print " Merge specified packages, but don't merge any dependencies."
- print " Note that the build may fail if deps aren't satisfied."
- print
- print " "+green("--noreplace")+" ("+green("-n")+" short option)"
- print " Skip the packages specified on the command-line that have"
- print " already been installed. Without this option, any packages,"
- print " ebuilds, or deps you specify on the command-line *will* cause"
- print " Portage to remerge the package, even if it is already installed."
- print " Note that Portage won't remerge dependencies by default."
- print
- print " "+green("--nospinner")
- print " Disables the spinner regardless of terminal type."
- print
- print " "+green("--oneshot")+" ("+green("-1")+" short option)"
- print " Emerge as normal, but don't add packages to the world profile."
- print " This package will only be updated if it is depended upon by"
- print " another package."
- print
- print " "+green("--onlydeps")+" ("+green("-o")+" short option)"
- print " Only merge (or pretend to merge) the dependencies of the"
- print " specified packages, not the packages themselves."
- print
- print " "+green("--pretend")+" ("+green("-p")+" short option)"
- print " Instead of actually performing the merge, simply display what"
- print " ebuilds and tbz2s *would* have been installed if --pretend"
- print " weren't used. Using --pretend is strongly recommended before"
- print " installing an unfamiliar package. In the printout, N = new,"
- print " U = updating, R = replacing, F = fetch restricted, B = blocked"
- print " by an already installed package, D = possible downgrading,"
- print " S = slotted install. --verbose causes affecting use flags to be"
- print " printed out accompanied by a '+' for enabled and a '-' for"
- print " disabled USE flags."
- print
- print " "+green("--quiet")+" ("+green("-q")+" short option)"
- print " Effects vary, but the general outcome is a reduced or condensed"
- print " output from portage's displays."
- print
- print " "+green("--skipfirst")
- print " This option is only valid in a resume situation. It removes the"
- print " first package in the resume list so that a merge may continue in"
- print " the presence of an uncorrectable or inconsequential error. This"
- print " should only be used in cases where skipping the package will not"
- print " result in failed dependencies."
- print
- print " "+green("--tree")+" ("+green("-t")+" short option)"
- print " Shows the dependency tree using indentation for dependencies."
- print " The packages are also listed in reverse merge order so that"
- print " a package's dependencies follow the package. Only really useful"
- print " in combination with --emptytree, --update or --deep."
- print
- print " "+green("--usepkg")+" ("+green("-k")+" short option)"
- print " Tell emerge to use binary packages (from $PKGDIR) if they are"
- print " available, thus possibly avoiding some time-consuming compiles."
- print " This option is useful for CD installs; you can export"
- print " PKGDIR=/mnt/cdrom/packages and then use this option to have"
- print " emerge \"pull\" binary packages from the CD in order to satisfy"
- print " dependencies."
- print
- print " "+green("--usepkgonly")+" ("+green("-K")+" short option)"
- print " Like --usepkg above, except this only allows the use of binary"
- print " packages, and it will abort the emerge if the package is not"
- print " available at the time of dependency calculation."
- print
- print " "+green("--verbose")+" ("+green("-v")+" short option)"
- print " Effects vary, but the general outcome is an increased or expanded"
- print " display of content in portage's displays."
- print
- print " "+green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" >"
- print " In dependency calculations, pull in build time dependencies that"
- print " are not strictly required. This defaults to 'n' for installation"
- print " actions and 'y' for the --depclean action. This setting can be"
- print " added to EMERGE_DEFAULT_OPTS (see make.conf(5)) and later"
- print " overridden via the command line."
- print
- elif myaction == "sync":
- print
- print bold("Usage: ")+turquoise("emerge")+" "+turquoise("--sync")
- print
- print " 'emerge --sync' tells emerge to update the Portage tree as specified in"
- print " The SYNC variable found in /etc/make.conf. By default, SYNC instructs"
- print " emerge to perform an rsync-style update with rsync.gentoo.org."
- print
- print " 'emerge-webrsync' exists as a helper app to emerge --sync, providing a"
- print " method to receive the entire portage tree as a tarball that can be"
- print " extracted and used. First time syncs would benefit greatly from this."
- print
- print " "+turquoise("WARNING:")
- print " If using our rsync server, emerge will clean out all files that do not"
- print " exist on it, including ones that you may have created. The exceptions"
- print " to this are the distfiles, local and packages directories."
- print
- elif myaction=="system":
- print
- print bold("Usage: ")+turquoise("emerge")+" [ "+green("options")+" ] "+turquoise("system")
- print
- print " \"emerge system\" is the Portage system update command. When run, it"
- print " will scan the etc/make.profile/packages file and determine what"
- print " packages need to be installed so that your system meets the minimum"
- print " requirements of your current system profile. Note that this doesn't"
- print " necessarily bring your system up-to-date at all; instead, it just"
- print " ensures that you have no missing parts. For example, if your system"
- print " profile specifies that you should have sys-apps/iptables installed"
- print " and you don't, then \"emerge system\" will install it (the most"
- print " recent version that matches the profile spec) for you. It's always a"
- print " good idea to do an \"emerge --pretend system\" before an \"emerge"
- print " system\", just so you know what emerge is planning to do."
- print
- elif myaction=="world":
- print
- print bold("Usage: ")+turquoise("emerge")+" [ "+green("options")+" ] "+turquoise("world")
- print
- print " 'emerge world' is the Portage command for completely updating your"
- print " system. The normal procedure is to first do an 'emerge --sync' and"
- print " then an 'emerge --update --deep world'. The first command brings your"
- print " local Portage tree up-to-date with the latest version information and"
- print " ebuilds. The second command then rebuilds all packages for which newer"
- print " versions or newer ebuilds have become available since you last did a"
- print " sync and update."
- print
- elif myaction=="config":
- outstuff=green("Config file management support (preliminary)")+"""
-
-Portage has a special feature called "config file protection". The purpose of
-this feature is to prevent new package installs from clobbering existing
-configuration files. By default, config file protection is turned on for /etc
-and the KDE configuration dirs; more may be added in the future.
-
-When Portage installs a file into a protected directory tree like /etc, any
-existing files will not be overwritten. If a file of the same name already
-exists, Portage will change the name of the to-be-installed file from 'foo' to
-'._cfg0000_foo'. If '._cfg0000_foo' already exists, this name becomes
-'._cfg0001_foo', etc. In this way, existing files are not overwritten,
-allowing the administrator to manually merge the new config files and avoid any
-unexpected changes.
-
-In addition to protecting overwritten files, Portage will not delete any files
-from a protected directory when a package is unmerged. While this may be a
-little bit untidy, it does prevent potentially valuable config files from being
-deleted, which is of paramount importance.
-
-Protected directories are set using the CONFIG_PROTECT variable, normally
-defined in /etc/make.globals. Directory exceptions to the CONFIG_PROTECTed
-directories can be specified using the CONFIG_PROTECT_MASK variable. To find
-files that need to be updated in /etc, type:
-
-# find /etc -iname '._cfg????_*'
-
-You can disable this feature by setting CONFIG_PROTECT="-*" in /etc/make.conf.
-Then, Portage will mercilessly auto-update your config files. Alternatively,
-you can leave Config File Protection on but tell Portage that it can overwrite
-files in certain specific /etc subdirectories. For example, if you wanted
-Portage to automatically update your rc scripts and your wget configuration,
-but didn't want any other changes made without your explicit approval, you'd
-add this to /etc/make.conf:
-
-CONFIG_PROTECT_MASK="/etc/wget /etc/rc.d"
-
-Tools such as dispatch-conf, cfg-update, and etc-update are also available to
-aid in the merging of these files. They provide interactive merging and can
-auto-merge trivial changes.
-
-"""
- print outstuff
-
+portage/emergehelp.py \ No newline at end of file
diff --git a/pym/getbinpkg.py b/pym/getbinpkg.py
index 462da429..89c09094 100644..120000
--- a/pym/getbinpkg.py
+++ b/pym/getbinpkg.py
@@ -1,572 +1 @@
-# getbinpkg.py -- Portage binary-package helper functions
-# Copyright 2003-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-if not hasattr(__builtins__, "set"):
- from sets import Set as set
-
-from output import red, yellow, green
-import htmllib,HTMLParser,formatter,sys,os,xpak,time,tempfile,base64,urllib2
-
-try:
- import cPickle
-except ImportError:
- import pickle as cPickle
-
-try:
- import ftplib
-except SystemExit, e:
- raise
-except Exception, e:
- sys.stderr.write(red("!!! CANNOT IMPORT FTPLIB: ")+str(e)+"\n")
-
-try:
- import httplib
-except SystemExit, e:
- raise
-except Exception, e:
- sys.stderr.write(red("!!! CANNOT IMPORT HTTPLIB: ")+str(e)+"\n")
-
-def make_metadata_dict(data):
- myid,myglob = data
-
- mydict = {}
- for x in xpak.getindex_mem(myid):
- mydict[x] = xpak.getitem(data,x)
-
- return mydict
-
-class ParseLinks(HTMLParser.HTMLParser):
- """Parser class that overrides HTMLParser to grab all anchors from an html
- page and provide suffix and prefix limitors"""
- def __init__(self):
- self.PL_anchors = []
- HTMLParser.HTMLParser.__init__(self)
-
- def get_anchors(self):
- return self.PL_anchors
-
- def get_anchors_by_prefix(self,prefix):
- newlist = []
- for x in self.PL_anchors:
- if x.startswith(prefix):
- if x not in newlist:
- newlist.append(x[:])
- return newlist
-
- def get_anchors_by_suffix(self,suffix):
- newlist = []
- for x in self.PL_anchors:
- if x.endswith(suffix):
- if x not in newlist:
- newlist.append(x[:])
- return newlist
-
- def handle_endtag(self,tag):
- pass
-
- def handle_starttag(self,tag,attrs):
- if tag == "a":
- for x in attrs:
- if x[0] == 'href':
- if x[1] not in self.PL_anchors:
- self.PL_anchors.append(urllib2.unquote(x[1]))
-
-
-def create_conn(baseurl,conn=None):
- """(baseurl,conn) --- Takes a protocol://site:port/address url, and an
- optional connection. If connection is already active, it is passed on.
- baseurl is reduced to address and is returned in tuple (conn,address)"""
- parts = baseurl.split("://",1)
- if len(parts) != 2:
- raise ValueError, "Provided URL does not contain protocol identifier. '%s'" % baseurl
- protocol,url_parts = parts
- del parts
- host,address = url_parts.split("/",1)
- del url_parts
- address = "/"+address
-
- userpass_host = host.split("@",1)
- if len(userpass_host) == 1:
- host = userpass_host[0]
- userpass = ["anonymous"]
- else:
- host = userpass_host[1]
- userpass = userpass_host[0].split(":")
- del userpass_host
-
- if len(userpass) > 2:
- raise ValueError, "Unable to interpret username/password provided."
- elif len(userpass) == 2:
- username = userpass[0]
- password = userpass[1]
- elif len(userpass) == 1:
- username = userpass[0]
- password = None
- del userpass
-
- http_headers = {}
- http_params = {}
- if username and password:
- http_headers = {
- "Authorization": "Basic %s" %
- base64.encodestring("%s:%s" % (username, password)).replace(
- "\012",
- ""
- ),
- }
-
- if not conn:
- if protocol == "https":
- conn = httplib.HTTPSConnection(host)
- elif protocol == "http":
- conn = httplib.HTTPConnection(host)
- elif protocol == "ftp":
- passive = 1
- if(host[-1] == "*"):
- passive = 0
- host = host[:-1]
- conn = ftplib.FTP(host)
- if password:
- conn.login(username,password)
- else:
- sys.stderr.write(yellow(" * No password provided for username")+" '"+str(username)+"'\n\n")
- conn.login(username)
- conn.set_pasv(passive)
- conn.set_debuglevel(0)
- else:
- raise NotImplementedError, "%s is not a supported protocol." % protocol
-
- return (conn,protocol,address, http_params, http_headers)
-
-def make_ftp_request(conn, address, rest=None, dest=None):
- """(conn,address,rest) --- uses the conn object to request the data
- from address and issuing a rest if it is passed."""
- try:
-
- if dest:
- fstart_pos = dest.tell()
-
- conn.voidcmd("TYPE I")
- fsize = conn.size(address)
-
- if (rest != None) and (rest < 0):
- rest = fsize+int(rest)
- if rest < 0:
- rest = 0
-
- if rest != None:
- mysocket = conn.transfercmd("RETR "+str(address), rest)
- else:
- mysocket = conn.transfercmd("RETR "+str(address))
-
- mydata = ""
- while 1:
- somedata = mysocket.recv(8192)
- if somedata:
- if dest:
- dest.write(somedata)
- else:
- mydata = mydata + somedata
- else:
- break
-
- if dest:
- data_size = fstart_pos - dest.tell()
- else:
- data_size = len(mydata)
-
- mysocket.close()
- conn.voidresp()
- conn.voidcmd("TYPE A")
-
- return mydata,not (fsize==data_size),""
-
- except ValueError, e:
- return None,int(str(e)[:4]),str(e)
-
-
-def make_http_request(conn, address, params={}, headers={}, dest=None):
- """(conn,address,params,headers) --- uses the conn object to request
- the data from address, performing Location forwarding and using the
- optional params and headers."""
-
- rc = 0
- response = None
- while (rc == 0) or (rc == 301) or (rc == 302):
- try:
- if (rc != 0):
- conn,ignore,ignore,ignore,ignore = create_conn(address)
- conn.request("GET", address, params, headers)
- except SystemExit, e:
- raise
- except Exception, e:
- return None,None,"Server request failed: "+str(e)
- response = conn.getresponse()
- rc = response.status
-
- # 301 means that the page address is wrong.
- if ((rc == 301) or (rc == 302)):
- ignored_data = response.read()
- del ignored_data
- for x in str(response.msg).split("\n"):
- parts = x.split(": ",1)
- if parts[0] == "Location":
- if (rc == 301):
- sys.stderr.write(red("Location has moved: ")+str(parts[1])+"\n")
- if (rc == 302):
- sys.stderr.write(red("Location has temporarily moved: ")+str(parts[1])+"\n")
- address = parts[1]
- break
-
- if (rc != 200) and (rc != 206):
- sys.stderr.write(str(response.msg)+"\n")
- sys.stderr.write(response.read()+"\n")
- sys.stderr.write("address: "+address+"\n")
- return None,rc,"Server did not respond successfully ("+str(response.status)+": "+str(response.reason)+")"
-
- if dest:
- dest.write(response.read())
- return "",0,""
-
- return response.read(),0,""
-
-
-def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
- myarray = []
-
- if not (prefix and suffix):
- match_both = 0
-
- for x in array:
- add_p = 0
- if prefix and (len(x) >= len(prefix)) and (x[:len(prefix)] == prefix):
- add_p = 1
-
- if match_both:
- if prefix and not add_p: # Require both, but don't have first one.
- continue
- else:
- if add_p: # Only need one, and we have it.
- myarray.append(x[:])
- continue
-
- if not allow_overlap: # Not allow to overlap prefix and suffix
- if len(x) >= (len(prefix)+len(suffix)):
- y = x[len(prefix):]
- else:
- continue # Too short to match.
- else:
- y = x # Do whatever... We're overlapping.
-
- if suffix and (len(x) >= len(suffix)) and (x[-len(suffix):] == suffix):
- myarray.append(x) # It matches
- else:
- continue # Doesn't match.
-
- return myarray
-
-
-
-def dir_get_list(baseurl,conn=None):
- """(baseurl[,connection]) -- Takes a base url to connect to and read from.
- URL should be in the for <proto>://<site>[:port]<path>
- Connection is used for persistent connection instances."""
-
- if not conn:
- keepconnection = 0
- else:
- keepconnection = 1
-
- conn,protocol,address,params,headers = create_conn(baseurl, conn)
-
- listing = None
- if protocol in ["http","https"]:
- page,rc,msg = make_http_request(conn,address,params,headers)
-
- if page:
- parser = ParseLinks()
- parser.feed(page)
- del page
- listing = parser.get_anchors()
- else:
- raise Exception, "Unable to get listing: %s %s" % (rc,msg)
- elif protocol in ["ftp"]:
- if address[-1] == '/':
- olddir = conn.pwd()
- conn.cwd(address)
- listing = conn.nlst()
- conn.cwd(olddir)
- del olddir
- else:
- listing = conn.nlst(address)
- else:
- raise TypeError, "Unknown protocol. '%s'" % protocol
-
- if not keepconnection:
- conn.close()
-
- return listing
-
-def file_get_metadata(baseurl,conn=None, chunk_size=3000):
- """(baseurl[,connection]) -- Takes a base url to connect to and read from.
- URL should be in the for <proto>://<site>[:port]<path>
- Connection is used for persistent connection instances."""
-
- if not conn:
- keepconnection = 0
- else:
- keepconnection = 1
-
- conn,protocol,address,params,headers = create_conn(baseurl, conn)
-
- if protocol in ["http","https"]:
- headers["Range"] = "bytes=-"+str(chunk_size)
- data,rc,msg = make_http_request(conn, address, params, headers)
- elif protocol in ["ftp"]:
- data,rc,msg = make_ftp_request(conn, address, -chunk_size)
- else:
- raise TypeError, "Unknown protocol. '%s'" % protocol
-
- if data:
- xpaksize = xpak.decodeint(data[-8:-4])
- if (xpaksize+8) > chunk_size:
- myid = file_get_metadata(baseurl, conn, (xpaksize+8))
- if not keepconnection:
- conn.close()
- return myid
- else:
- xpak_data = data[len(data)-(xpaksize+8):-8]
- del data
-
- myid = xpak.xsplit_mem(xpak_data)
- if not myid:
- myid = None,None
- del xpak_data
- else:
- myid = None,None
-
- if not keepconnection:
- conn.close()
-
- return myid
-
-
-def file_get(baseurl,dest,conn=None,fcmd=None):
- """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from.
- URL should be in the for <proto>://[user[:pass]@]<site>[:port]<path>"""
-
- if not fcmd:
- return file_get_lib(baseurl,dest,conn)
-
- fcmd = fcmd.replace("${DISTDIR}",dest)
- fcmd = fcmd.replace("${URI}", baseurl)
- fcmd = fcmd.replace("${FILE}", os.path.basename(baseurl))
- mysplit = fcmd.split()
- mycmd = mysplit[0]
- myargs = [os.path.basename(mycmd)]+mysplit[1:]
- mypid=os.fork()
- if mypid == 0:
- try:
- os.execv(mycmd,myargs)
- except OSError:
- pass
- sys.stderr.write("!!! Failed to spawn fetcher.\n")
- sys.stderr.flush()
- os._exit(1)
- retval=os.waitpid(mypid,0)[1]
- if (retval & 0xff) == 0:
- retval = retval >> 8
- else:
- sys.stderr.write("Spawned processes caught a signal.\n")
- sys.exit(1)
- if retval != 0:
- sys.stderr.write("Fetcher exited with a failure condition.\n")
- return 0
- return 1
-
-def file_get_lib(baseurl,dest,conn=None):
- """(baseurl[,connection]) -- Takes a base url to connect to and read from.
- URL should be in the for <proto>://<site>[:port]<path>
- Connection is used for persistent connection instances."""
-
- if not conn:
- keepconnection = 0
- else:
- keepconnection = 1
-
- conn,protocol,address,params,headers = create_conn(baseurl, conn)
-
- sys.stderr.write("Fetching '"+str(os.path.basename(address)+"'\n"))
- if protocol in ["http","https"]:
- data,rc,msg = make_http_request(conn, address, params, headers, dest=dest)
- elif protocol in ["ftp"]:
- data,rc,msg = make_ftp_request(conn, address, dest=dest)
- else:
- raise TypeError, "Unknown protocol. '%s'" % protocol
-
- if not keepconnection:
- conn.close()
-
- return rc
-
-
-def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
- """(baseurl,conn,chunk_size,verbose) --
- """
- if not conn:
- keepconnection = 0
- else:
- keepconnection = 1
-
- if makepickle is None:
- makepickle = "/var/cache/edb/metadata.idx.most_recent"
-
- conn,protocol,address,params,headers = create_conn(baseurl, conn)
-
- filedict = {}
-
- try:
- metadatafile = open("/var/cache/edb/remote_metadata.pickle")
- metadata = cPickle.load(metadatafile)
- sys.stderr.write("Loaded metadata pickle.\n")
- metadatafile.close()
- except (cPickle.UnpicklingError, OSError, IOError, EOFError):
- metadata = {}
- if not metadata.has_key(baseurl):
- metadata[baseurl]={}
- if not metadata[baseurl].has_key("indexname"):
- metadata[baseurl]["indexname"]=""
- if not metadata[baseurl].has_key("timestamp"):
- metadata[baseurl]["timestamp"]=0
- if not metadata[baseurl].has_key("unmodified"):
- metadata[baseurl]["unmodified"]=0
- if not metadata[baseurl].has_key("data"):
- metadata[baseurl]["data"]={}
-
- filelist = dir_get_list(baseurl, conn)
- tbz2list = match_in_array(filelist, suffix=".tbz2")
- metalist = match_in_array(filelist, prefix="metadata.idx")
- del filelist
-
- # Determine if our metadata file is current.
- metalist.sort()
- metalist.reverse() # makes the order new-to-old.
- havecache=0
- for mfile in metalist:
- if usingcache and \
- ((metadata[baseurl]["indexname"] != mfile) or \
- (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))):
- # Try to download new cache until we succeed on one.
- data=""
- for trynum in [1,2,3]:
- mytempfile = tempfile.TemporaryFile()
- try:
- file_get(baseurl+"/"+mfile, mytempfile, conn)
- if mytempfile.tell() > len(data):
- mytempfile.seek(0)
- data = mytempfile.read()
- except ValueError, e:
- sys.stderr.write("--- "+str(e)+"\n")
- if trynum < 3:
- sys.stderr.write("Retrying...\n")
- mytempfile.close()
- continue
- if match_in_array([mfile],suffix=".gz"):
- sys.stderr.write("gzip'd\n")
- try:
- import gzip
- mytempfile.seek(0)
- gzindex = gzip.GzipFile(mfile[:-3],'rb',9,mytempfile)
- data = gzindex.read()
- except SystemExit, e:
- raise
- except Exception, e:
- mytempfile.close()
- sys.stderr.write("!!! Failed to use gzip: "+str(e)+"\n")
- mytempfile.close()
- try:
- metadata[baseurl]["data"] = cPickle.loads(data)
- del data
- metadata[baseurl]["indexname"] = mfile
- metadata[baseurl]["timestamp"] = int(time.time())
- metadata[baseurl]["modified"] = 0 # It's not, right after download.
- sys.stderr.write("Pickle loaded.\n")
- break
- except SystemExit, e:
- raise
- except Exception, e:
- sys.stderr.write("!!! Failed to read data from index: "+str(mfile)+"\n")
- sys.stderr.write("!!! "+str(e)+"\n")
- try:
- metadatafile = open("/var/cache/edb/remote_metadata.pickle", "w+")
- cPickle.dump(metadata,metadatafile)
- metadatafile.close()
- except SystemExit, e:
- raise
- except Exception, e:
- sys.stderr.write("!!! Failed to write binary metadata to disk!\n")
- sys.stderr.write("!!! "+str(e)+"\n")
- break
- # We may have metadata... now we run through the tbz2 list and check.
- sys.stderr.write(yellow("cache miss: 'x'")+" --- "+green("cache hit: 'o'")+"\n")
- binpkg_filenames = set()
- for x in tbz2list:
- x = os.path.basename(x)
- binpkg_filenames.add(x)
- if ((not metadata[baseurl]["data"].has_key(x)) or \
- (x not in metadata[baseurl]["data"].keys())):
- sys.stderr.write(yellow("x"))
- metadata[baseurl]["modified"] = 1
- myid = None
- for retry in xrange(3):
- try:
- myid = file_get_metadata(
- "/".join((baseurl.rstrip("/"), x.lstrip("/"))),
- conn, chunk_size)
- break
- except httplib.BadStatusLine:
- # Sometimes this error is thrown from conn.getresponse() in
- # make_http_request(). The docstring for this error in
- # httplib.py says "Presumably, the server closed the
- # connection before sending a valid response".
- conn, protocol, address, params, headers = create_conn(
- baseurl)
-
- if myid and myid[0]:
- metadata[baseurl]["data"][x] = make_metadata_dict(myid)
- elif verbose:
- sys.stderr.write(red("!!! Failed to retrieve metadata on: ")+str(x)+"\n")
- else:
- sys.stderr.write(green("o"))
- # Cleanse stale cache for files that don't exist on the server anymore.
- stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
- if stale_cache:
- for x in stale_cache:
- del metadata[baseurl]["data"][x]
- metadata[baseurl]["modified"] = 1
- del stale_cache
- del binpkg_filenames
- sys.stderr.write("\n")
-
- try:
- if metadata[baseurl].has_key("modified") and metadata[baseurl]["modified"]:
- metadata[baseurl]["timestamp"] = int(time.time())
- metadatafile = open("/var/cache/edb/remote_metadata.pickle", "w+")
- cPickle.dump(metadata,metadatafile)
- metadatafile.close()
- if makepickle:
- metadatafile = open(makepickle, "w")
- cPickle.dump(metadata[baseurl]["data"],metadatafile)
- metadatafile.close()
- except SystemExit, e:
- raise
- except Exception, e:
- sys.stderr.write("!!! Failed to write binary metadata to disk!\n")
- sys.stderr.write("!!! "+str(e)+"\n")
-
- if not keepconnection:
- conn.close()
-
- return metadata[baseurl]["data"]
+portage/getbinpkg.py \ No newline at end of file
diff --git a/pym/output.py b/pym/output.py
index 62ec975f..f99c2257 100644..120000
--- a/pym/output.py
+++ b/pym/output.py
@@ -1,393 +1 @@
-# Copyright 1998-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-__docformat__ = "epytext"
-
-import commands,errno,os,re,shlex,sys
-from portage_const import COLOR_MAP_FILE
-from portage_util import writemsg
-from portage_exception import PortageException, ParseError, PermissionDenied, FileNotFound
-
-havecolor=1
-dotitles=1
-
-esc_seq = "\x1b["
-
-g_attr = {}
-g_attr["normal"] = 0
-
-g_attr["bold"] = 1
-g_attr["faint"] = 2
-g_attr["standout"] = 3
-g_attr["underline"] = 4
-g_attr["blink"] = 5
-g_attr["overline"] = 6 # Why is overline actually useful?
-g_attr["reverse"] = 7
-g_attr["invisible"] = 8
-
-g_attr["no-attr"] = 22
-g_attr["no-standout"] = 23
-g_attr["no-underline"] = 24
-g_attr["no-blink"] = 25
-g_attr["no-overline"] = 26
-g_attr["no-reverse"] = 27
-# 28 isn't defined?
-# 29 isn't defined?
-g_attr["black"] = 30
-g_attr["red"] = 31
-g_attr["green"] = 32
-g_attr["yellow"] = 33
-g_attr["blue"] = 34
-g_attr["magenta"] = 35
-g_attr["cyan"] = 36
-g_attr["white"] = 37
-# 38 isn't defined?
-g_attr["default"] = 39
-g_attr["bg_black"] = 40
-g_attr["bg_red"] = 41
-g_attr["bg_green"] = 42
-g_attr["bg_yellow"] = 43
-g_attr["bg_blue"] = 44
-g_attr["bg_magenta"] = 45
-g_attr["bg_cyan"] = 46
-g_attr["bg_white"] = 47
-g_attr["bg_default"] = 49
-
-
-# make_seq("blue", "black", "normal")
-def color(fg, bg="default", attr=["normal"]):
- mystr = esc_seq[:] + "%02d" % g_attr[fg]
- for x in [bg]+attr:
- mystr += ";%02d" % g_attr[x]
- return mystr+"m"
-
-
-
-codes={}
-codes["reset"] = esc_seq + "39;49;00m"
-
-codes["bold"] = esc_seq + "01m"
-codes["faint"] = esc_seq + "02m"
-codes["standout"] = esc_seq + "03m"
-codes["underline"] = esc_seq + "04m"
-codes["blink"] = esc_seq + "05m"
-codes["overline"] = esc_seq + "06m" # Who made this up? Seriously.
-
-ansi_color_codes = []
-for x in xrange(30, 38):
- ansi_color_codes.append("%im" % x)
- ansi_color_codes.append("%i;01m" % x)
-
-rgb_ansi_colors = ['0x000000', '0x555555', '0xAA0000', '0xFF5555', '0x00AA00',
- '0x55FF55', '0xAA5500', '0xFFFF55', '0x0000AA', '0x5555FF', '0xAA00AA',
- '0xFF55FF', '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF']
-
-for x in xrange(len(rgb_ansi_colors)):
- codes[rgb_ansi_colors[x]] = esc_seq + ansi_color_codes[x]
-
-del x
-
-codes["black"] = codes["0x000000"]
-codes["darkgray"] = codes["0x555555"]
-
-codes["red"] = codes["0xFF5555"]
-codes["darkred"] = codes["0xAA0000"]
-
-codes["green"] = codes["0x55FF55"]
-codes["darkgreen"] = codes["0x00AA00"]
-
-codes["yellow"] = codes["0xFFFF55"]
-codes["brown"] = codes["0xAA5500"]
-
-codes["blue"] = codes["0x5555FF"]
-codes["darkblue"] = codes["0x0000AA"]
-
-codes["fuchsia"] = codes["0xFF55FF"]
-codes["purple"] = codes["0xAA00AA"]
-
-codes["turquoise"] = codes["0x55FFFF"]
-codes["teal"] = codes["0x00AAAA"]
-
-codes["white"] = codes["0xFFFFFF"]
-codes["lightgray"] = codes["0xAAAAAA"]
-
-codes["darkteal"] = codes["turquoise"]
-codes["darkyellow"] = codes["brown"]
-codes["fuscia"] = codes["fuchsia"]
-codes["white"] = codes["bold"]
-
-# Colors from /sbin/functions.sh
-codes["GOOD"] = codes["green"]
-codes["WARN"] = codes["yellow"]
-codes["BAD"] = codes["red"]
-codes["HILITE"] = codes["teal"]
-codes["BRACKET"] = codes["blue"]
-
-# Portage functions
-codes["INFORM"] = codes["darkgreen"]
-codes["UNMERGE_WARN"] = codes["red"]
-codes["MERGE_LIST_PROGRESS"] = codes["yellow"]
-
-def parse_color_map():
- myfile = COLOR_MAP_FILE
- ansi_code_pattern = re.compile("^[0-9;]*m$")
- def strip_quotes(token, quotes):
- if token[0] in quotes and token[0] == token[-1]:
- token = token[1:-1]
- return token
- try:
- s = shlex.shlex(open(myfile))
- s.wordchars = s.wordchars + ";" # for ansi codes
- d = {}
- while True:
- k, o, v = s.get_token(), s.get_token(), s.get_token()
- if k is s.eof:
- break
- if o != "=":
- raise ParseError("%s%s'%s'" % (s.error_leader(myfile, s.lineno), "expected '=' operator: ", o))
- k = strip_quotes(k, s.quotes)
- v = strip_quotes(v, s.quotes)
- if ansi_code_pattern.match(v):
- codes[k] = esc_seq + v
- else:
- if v in codes:
- codes[k] = codes[v]
- else:
- raise ParseError("%s%s'%s'" % (s.error_leader(myfile, s.lineno), "Undefined: ", v))
- except (IOError, OSError), e:
- if e.errno == errno.ENOENT:
- raise FileNotFound(myfile)
- elif e.errno == errno.EACCES:
- raise PermissionDenied(myfile)
- raise
-
-try:
- parse_color_map()
-except FileNotFound, e:
- pass
-except PortageException, e:
- writemsg("%s\n" % str(e))
-
-def nc_len(mystr):
- tmp = re.sub(esc_seq + "^m]+m", "", mystr);
- return len(tmp)
-
-def xtermTitle(mystr, raw=False):
- if havecolor and dotitles and os.environ.has_key("TERM") and sys.stderr.isatty():
- myt=os.environ["TERM"]
- legal_terms = ["xterm","Eterm","aterm","rxvt","screen","kterm","rxvt-unicode","gnome"]
- for term in legal_terms:
- if myt.startswith(term):
- if not raw:
- mystr = "\x1b]0;%s\x07" % mystr
- sys.stderr.write(mystr)
- sys.stderr.flush()
- break
-
-default_xterm_title = None
-
-def xtermTitleReset():
- global default_xterm_title
- if default_xterm_title is None:
- prompt_command = os.getenv('PROMPT_COMMAND')
- if prompt_command == "":
- default_xterm_title = ""
- elif prompt_command is not None:
- default_xterm_title = commands.getoutput(prompt_command)
- else:
- pwd = os.getenv('PWD','')
- home = os.getenv('HOME', '')
- if home != '' and pwd.startswith(home):
- pwd = '~' + pwd[len(home):]
- default_xterm_title = '\x1b]0;%s@%s:%s\x07' % (
- os.getenv('LOGNAME', ''), os.getenv('HOSTNAME', '').split('.', 1)[0], pwd)
- xtermTitle(default_xterm_title, raw=True)
-
-def notitles():
- "turn off title setting"
- dotitles=0
-
-def nocolor():
- "turn off colorization"
- global havecolor
- havecolor=0
-
-def resetColor():
- return codes["reset"]
-
-def colorize(color_key, text):
- global havecolor
- if havecolor:
- return codes[color_key] + text + codes["reset"]
- else:
- return text
-
-compat_functions_colors = ["bold","white","teal","turquoise","darkteal",
- "fuscia","fuchsia","purple","blue","darkblue","green","darkgreen","yellow",
- "brown","darkyellow","red","darkred"]
-
-def create_color_func(color_key):
- def derived_func(*args):
- newargs = list(args)
- newargs.insert(0, color_key)
- return colorize(*newargs)
- return derived_func
-
-for c in compat_functions_colors:
- setattr(sys.modules[__name__], c, create_color_func(c))
-
-class EOutput:
- """
- Performs fancy terminal formatting for status and informational messages.
-
- The provided methods produce identical terminal output to the eponymous
- functions in the shell script C{/sbin/functions.sh} and also accept
- identical parameters.
-
- This is not currently a drop-in replacement however, as the output-related
- functions in C{/sbin/functions.sh} are oriented for use mainly by system
- init scripts and ebuilds and their output can be customized via certain
- C{RC_*} environment variables (see C{/etc/conf.d/rc}). B{EOutput} is not
- customizable in this manner since it's intended for more general uses.
- Likewise, no logging is provided.
-
- @ivar quiet: Specifies if output should be silenced.
- @type quiet: BooleanType
- @ivar term_columns: Width of terminal in characters. Defaults to the value
- specified by the shell's C{COLUMNS} variable, else to the queried tty
- size, else to C{80}.
- @type term_columns: IntType
- """
-
- def __init__(self):
- self.__last_e_cmd = ""
- self.__last_e_len = 0
- self.quiet = False
- columns = 0
- try:
- columns = int(os.getenv("COLUMNS", 0))
- except ValueError:
- pass
- if columns <= 0:
- try:
- columns = int(commands.getoutput(
- 'set -- `stty size 2>/dev/null` ; echo "$2"'))
- except ValueError:
- pass
- if columns <= 0:
- columns = 80
- self.term_columns = columns
-
- def __eend(self, caller, errno, msg):
- if errno == 0:
- status_brackets = colorize("BRACKET", "[ ") + colorize("GOOD", "ok") + colorize("BRACKET", " ]")
- else:
- status_brackets = colorize("BRACKET", "[ ") + colorize("BAD", "!!") + colorize("BRACKET", " ]")
- if msg:
- if caller == "eend":
- self.eerror(msg[0])
- elif caller == "ewend":
- self.ewarn(msg[0])
- if self.__last_e_cmd != "ebegin":
- self.__last_e_len = 0
- print "%*s%s" % ((self.term_columns - self.__last_e_len - 6), "", status_brackets)
- sys.stdout.flush()
-
- def ebegin(self, msg):
- """
- Shows a message indicating the start of a process.
-
- @param msg: A very brief (shorter than one line) description of the
- starting process.
- @type msg: StringType
- """
- msg += " ..."
- if not self.quiet:
- self.einfon(msg)
- self.__last_e_len = len(msg) + 4
- self.__last_e_cmd = "ebegin"
-
- def eend(self, errno, *msg):
- """
- Indicates the completion of a process, optionally displaying a message
- via L{eerror} if the process's exit status isn't C{0}.
-
- @param errno: A standard UNIX C{errno} code returned by processes upon
- exit.
- @type errno: IntType
- @param msg: I{(optional)} An error message, typically a standard UNIX
- error string corresponding to C{errno}.
- @type msg: StringType
- """
- if not self.quiet:
- self.__eend("eend", errno, msg)
- self.__last_e_cmd = "eend"
-
- def eerror(self, msg):
- """
- Shows an error message.
-
- @param msg: A very brief (shorter than one line) error message.
- @type msg: StringType
- """
- if not self.quiet:
- if self.__last_e_cmd == "ebegin": print
- print colorize("BAD", " * ") + msg
- sys.stdout.flush()
- self.__last_e_cmd = "eerror"
-
- def einfo(self, msg):
- """
- Shows an informative message terminated with a newline.
-
- @param msg: A very brief (shorter than one line) informative message.
- @type msg: StringType
- """
- if not self.quiet:
- if self.__last_e_cmd == "ebegin": print
- print colorize("GOOD", " * ") + msg
- sys.stdout.flush()
- self.__last_e_cmd = "einfo"
-
- def einfon(self, msg):
- """
- Shows an informative message terminated without a newline.
-
- @param msg: A very brief (shorter than one line) informative message.
- @type msg: StringType
- """
- if not self.quiet:
- if self.__last_e_cmd == "ebegin": print
- print colorize("GOOD", " * ") + msg ,
- sys.stdout.flush()
- self.__last_e_cmd = "einfon"
-
- def ewarn(self, msg):
- """
- Shows a warning message.
-
- @param msg: A very brief (shorter than one line) warning message.
- @type msg: StringType
- """
- if not self.quiet:
- if self.__last_e_cmd == "ebegin": print
- print colorize("WARN", " * ") + msg
- sys.stdout.flush()
- self.__last_e_cmd = "ewarn"
-
- def ewend(self, errno, *msg):
- """
- Indicates the completion of a process, optionally displaying a message
- via L{ewarn} if the process's exit status isn't C{0}.
-
- @param errno: A standard UNIX C{errno} code returned by processes upon
- exit.
- @type errno: IntType
- @param msg: I{(optional)} A warning message, typically a standard UNIX
- error string corresponding to C{errno}.
- @type msg: StringType
- """
- if not self.quiet:
- self.__eend("ewend", errno, msg)
- self.__last_e_cmd = "ewend"
+portage/output.py \ No newline at end of file
diff --git a/pym/portage.py b/pym/portage/__init__.py
index 98303857..98303857 100644
--- a/pym/portage.py
+++ b/pym/portage/__init__.py
diff --git a/pym/cache/__init__.py b/pym/portage/cache/__init__.py
index cb1b59d6..cb1b59d6 100644
--- a/pym/cache/__init__.py
+++ b/pym/portage/cache/__init__.py
diff --git a/pym/cache/anydbm.py b/pym/portage/cache/anydbm.py
index a4e0003d..a4e0003d 100644
--- a/pym/cache/anydbm.py
+++ b/pym/portage/cache/anydbm.py
diff --git a/pym/cache/cache_errors.py b/pym/portage/cache/cache_errors.py
index f63e5994..f63e5994 100644
--- a/pym/cache/cache_errors.py
+++ b/pym/portage/cache/cache_errors.py
diff --git a/pym/cache/flat_hash.py b/pym/portage/cache/flat_hash.py
index 48e8a175..48e8a175 100644
--- a/pym/cache/flat_hash.py
+++ b/pym/portage/cache/flat_hash.py
diff --git a/pym/cache/flat_list.py b/pym/portage/cache/flat_list.py
index 85efa4c0..85efa4c0 100644
--- a/pym/cache/flat_list.py
+++ b/pym/portage/cache/flat_list.py
diff --git a/pym/cache/fs_template.py b/pym/portage/cache/fs_template.py
index b76e98bd..b76e98bd 100644
--- a/pym/cache/fs_template.py
+++ b/pym/portage/cache/fs_template.py
diff --git a/pym/cache/mappings.py b/pym/portage/cache/mappings.py
index 9aa5a21e..9aa5a21e 100644
--- a/pym/cache/mappings.py
+++ b/pym/portage/cache/mappings.py
diff --git a/pym/cache/metadata.py b/pym/portage/cache/metadata.py
index df039d5e..df039d5e 100644
--- a/pym/cache/metadata.py
+++ b/pym/portage/cache/metadata.py
diff --git a/pym/cache/metadata_overlay.py b/pym/portage/cache/metadata_overlay.py
index d82ba96f..d82ba96f 100644
--- a/pym/cache/metadata_overlay.py
+++ b/pym/portage/cache/metadata_overlay.py
diff --git a/pym/cache/sql_template.py b/pym/portage/cache/sql_template.py
index e635616e..e635616e 100644
--- a/pym/cache/sql_template.py
+++ b/pym/portage/cache/sql_template.py
diff --git a/pym/cache/sqlite.py b/pym/portage/cache/sqlite.py
index 5c1bfa26..5c1bfa26 100644
--- a/pym/cache/sqlite.py
+++ b/pym/portage/cache/sqlite.py
diff --git a/pym/cache/template.py b/pym/portage/cache/template.py
index 4ffd9b9e..4ffd9b9e 100644
--- a/pym/cache/template.py
+++ b/pym/portage/cache/template.py
diff --git a/pym/cache/util.py b/pym/portage/cache/util.py
index 6393deef..6393deef 100644
--- a/pym/cache/util.py
+++ b/pym/portage/cache/util.py
diff --git a/pym/cache/volatile.py b/pym/portage/cache/volatile.py
index 0a204b70..0a204b70 100644
--- a/pym/cache/volatile.py
+++ b/pym/portage/cache/volatile.py
diff --git a/pym/portage/checksum.py b/pym/portage/checksum.py
new file mode 100644
index 00000000..7f1a89c8
--- /dev/null
+++ b/pym/portage/checksum.py
@@ -0,0 +1,219 @@
+# portage_checksum.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+from portage_const import PRIVATE_PATH,PRELINK_BINARY,HASHING_BLOCKSIZE
+import os
+import errno
+import shutil
+import stat
+import portage_exception
+import portage_exec
+import portage_util
+import portage_locks
+import commands
+import sha
+
+
+# actual hash functions first
+
+#dict of all available hash functions
+hashfunc_map = {}
+
+# We _try_ to load this module. If it fails we do the slightly slower fallback.
+try:
+ import fchksum
+
+ def md5hash(filename):
+ return fchksum.fmd5t(filename)
+
+except ImportError:
+ import md5
+ def md5hash(filename):
+ return pyhash(filename, md5)
+hashfunc_map["MD5"] = md5hash
+
+def sha1hash(filename):
+ return pyhash(filename, sha)
+hashfunc_map["SHA1"] = sha1hash
+
+# Keep pycrypto optional for now, there are no internal fallbacks for these
+try:
+ import Crypto.Hash.SHA256
+
+ def sha256hash(filename):
+ return pyhash(filename, Crypto.Hash.SHA256)
+ hashfunc_map["SHA256"] = sha256hash
+except ImportError:
+ pass
+
+try:
+ import Crypto.Hash.RIPEMD
+
+ def rmd160hash(filename):
+ return pyhash(filename, Crypto.Hash.RIPEMD)
+ hashfunc_map["RMD160"] = rmd160hash
+except ImportError:
+ pass
+
+def getsize(filename):
+ size = os.stat(filename).st_size
+ return (size, size)
+hashfunc_map["size"] = getsize
+
+# end actual hash functions
+
+prelink_capable = False
+if os.path.exists(PRELINK_BINARY):
+ results = commands.getstatusoutput(PRELINK_BINARY+" --version > /dev/null 2>&1")
+ if (results[0] >> 8) == 0:
+ prelink_capable=1
+ del results
+
+def perform_md5(x, calc_prelink=0):
+ return perform_checksum(x, "MD5", calc_prelink)[0]
+
+def perform_all(x, calc_prelink=0):
+ mydict = {}
+ for k in hashfunc_map.keys():
+ mydict[k] = perform_checksum(x, hashfunc_map[k], calc_prelink)[0]
+ return mydict
+
+def get_valid_checksum_keys():
+ return hashfunc_map.keys()
+
+def verify_all(filename, mydict, calc_prelink=0, strict=0):
+ """
+ Verify all checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
+ @type strict: Integer
+ @rtype: Tuple
+ @return: Result of the checks and possible message:
+ 1) If size fails, False, and a tuple containing a message, the given size, and the actual size
+ 2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
+ 3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
+ 4) If all checks succeed, return True and a fake reason
+ """
+ # Dict relates to single file only.
+ # returns: (passed,reason)
+ file_is_ok = True
+ reason = "Reason unknown"
+ try:
+ mysize = os.stat(filename)[stat.ST_SIZE]
+ if mydict["size"] != mysize:
+ return False,("Filesize does not match recorded size", mysize, mydict["size"])
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise portage_exception.FileNotFound(filename)
+ return False, (str(e), None, None)
+ for x in mydict.keys():
+ if x == "size":
+ continue
+ elif x in hashfunc_map.keys():
+ myhash = perform_checksum(filename, x, calc_prelink=calc_prelink)[0]
+ if mydict[x] != myhash:
+ if strict:
+ raise portage_exception.DigestException, "Failed to verify '$(file)s' on checksum type '%(type)s'" % {"file":filename, "type":x}
+ else:
+ file_is_ok = False
+ reason = (("Failed on %s verification" % x), myhash,mydict[x])
+ break
+ return file_is_ok,reason
+
+def pyhash(filename, hashobject):
+ """
+ Run a checksum against a file.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @param hashname: The hash object that will execute the checksum on the file
+ @type hashname: Object
+ @return: The hash and size of the data
+ """
+ f = open(filename, 'rb')
+ blocksize = HASHING_BLOCKSIZE
+ data = f.read(blocksize)
+ size = 0L
+ sum = hashobject.new()
+ while data:
+ sum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+ f.close()
+
+ return (sum.hexdigest(), size)
+
+def perform_checksum(filename, hashname="MD5", calc_prelink=0):
+ """
+ Run a specific checksum against a file.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @param hashname: The type of hash function to run
+ @type hashname: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: The hash and size of the data
+ """
+ myfilename = filename[:]
+ prelink_tmpfile = os.path.join("/", PRIVATE_PATH, "prelink-checksum.tmp." + str(os.getpid()))
+ mylock = None
+ try:
+ if calc_prelink and prelink_capable:
+ mylock = portage_locks.lockfile(prelink_tmpfile, wantnewlockfile=1)
+ # Create non-prelinked temporary file to checksum.
+ # Files rejected by prelink are summed in place.
+ retval = portage_exec.spawn([PRELINK_BINARY, "--undo", "-o",
+ prelink_tmpfile, filename], fd_pipes={})
+ if retval == os.EX_OK:
+ myfilename = prelink_tmpfile
+ try:
+ if hashname not in hashfunc_map:
+ raise portage_exception.DigestException(hashname + \
+ " hash function not available (needs dev-python/pycrypto)")
+ myhash, mysize = hashfunc_map[hashname](myfilename)
+ except (OSError, IOError), e:
+ if e.errno == errno.ENOENT:
+ raise portage_exception.FileNotFound(myfilename)
+ raise
+ if calc_prelink and prelink_capable:
+ try:
+ os.unlink(prelink_tmpfile)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ return myhash, mysize
+ finally:
+ if mylock:
+ portage_locks.unlockfile(mylock)
+
+def perform_multiple_checksums(filename, hashes=["MD5"], calc_prelink=0):
+ """
+ Run a group of checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param hashes: A list of checksum functions to run against the file
+ @type hashname: List
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: A dictionary in the form:
+ return_value[hash_name] = (hash_result,size)
+ for each given checksum
+ """
+ rVal = {}
+ for x in hashes:
+ if x not in hashfunc_map:
+ raise portage_exception.DigestException, x+" hash function not available (needs dev-python/pycrypto)"
+ rVal[x] = perform_checksum(filename, x, calc_prelink)[0]
+ return rVal
diff --git a/pym/portage/const.py b/pym/portage/const.py
new file mode 100644
index 00000000..e1af7cb4
--- /dev/null
+++ b/pym/portage/const.py
@@ -0,0 +1,65 @@
+# portage: Constants
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+# ===========================================================================
+# START OF CONSTANTS -- START OF CONSTANTS -- START OF CONSTANTS -- START OF
+# ===========================================================================
+
+import os
+
+VDB_PATH = "var/db/pkg"
+PRIVATE_PATH = "var/lib/portage"
+CACHE_PATH = "/var/cache/edb"
+DEPCACHE_PATH = CACHE_PATH+"/dep"
+
+USER_CONFIG_PATH = "/etc/portage"
+MODULES_FILE_PATH = USER_CONFIG_PATH+"/modules"
+CUSTOM_PROFILE_PATH = USER_CONFIG_PATH+"/profile"
+
+#PORTAGE_BASE_PATH = "/usr/lib/portage"
+PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(__file__.split(os.sep)[:-2]))
+PORTAGE_BIN_PATH = PORTAGE_BASE_PATH+"/bin"
+PORTAGE_PYM_PATH = PORTAGE_BASE_PATH+"/pym"
+NEWS_LIB_PATH = "/var/lib/gentoo"
+PROFILE_PATH = "/etc/make.profile"
+LOCALE_DATA_PATH = PORTAGE_BASE_PATH+"/locale"
+
+EBUILD_SH_BINARY = PORTAGE_BIN_PATH+"/ebuild.sh"
+MISC_SH_BINARY = PORTAGE_BIN_PATH + "/misc-functions.sh"
+SANDBOX_BINARY = "/usr/bin/sandbox"
+BASH_BINARY = "/bin/bash"
+MOVE_BINARY = "/bin/mv"
+PRELINK_BINARY = "/usr/sbin/prelink"
+
+WORLD_FILE = PRIVATE_PATH + "/world"
+MAKE_CONF_FILE = "/etc/make.conf"
+MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults"
+DEPRECATED_PROFILE_FILE = PROFILE_PATH+"/deprecated"
+USER_VIRTUALS_FILE = USER_CONFIG_PATH+"/virtuals"
+EBUILD_SH_ENV_FILE = USER_CONFIG_PATH+"/bashrc"
+INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env"
+CUSTOM_MIRRORS_FILE = USER_CONFIG_PATH+"/mirrors"
+CONFIG_MEMORY_FILE = PRIVATE_PATH + "/config"
+COLOR_MAP_FILE = USER_CONFIG_PATH + "/color.map"
+
+REPO_NAME_FILE = "repo_name"
+REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
+
+INCREMENTALS=["USE","USE_EXPAND","USE_EXPAND_HIDDEN","FEATURES","ACCEPT_KEYWORDS","ACCEPT_LICENSE","CONFIG_PROTECT_MASK","CONFIG_PROTECT","PRELINK_PATH","PRELINK_PATH_MASK"]
+EBUILD_PHASES = ["setup", "unpack", "compile", "test", "install",
+ "preinst", "postinst", "prerm", "postrm", "other"]
+
+EAPI = 0
+
+HASHING_BLOCKSIZE = 32768
+MANIFEST1_HASH_FUNCTIONS = ["MD5","SHA256","RMD160"]
+MANIFEST2_HASH_FUNCTIONS = ["SHA1","SHA256","RMD160"]
+MANIFEST2_REQUIRED_HASH = "SHA1"
+
+MANIFEST2_IDENTIFIERS = ["AUX","MISC","DIST","EBUILD"]
+# ===========================================================================
+# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
+# ===========================================================================
diff --git a/pym/portage/cvstree.py b/pym/portage/cvstree.py
new file mode 100644
index 00000000..30f143cd
--- /dev/null
+++ b/pym/portage/cvstree.py
@@ -0,0 +1,295 @@
+# cvstree.py -- cvs tree utilities
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+import os,time,sys,re
+from stat import *
+
+# [D]/Name/Version/Date/Flags/Tags
+
+def pathdata(entries, path):
+ """(entries,path)
+ Returns the data(dict) for a specific file/dir at the path specified."""
+ mysplit=path.split("/")
+ myentries=entries
+ mytarget=mysplit[-1]
+ mysplit=mysplit[:-1]
+ for mys in mysplit:
+ if myentries["dirs"].has_key(mys):
+ myentries=myentries["dirs"][mys]
+ else:
+ return None
+ if myentries["dirs"].has_key(mytarget):
+ return myentries["dirs"][mytarget]
+ elif myentries["files"].has_key(mytarget):
+ return myentries["files"][mytarget]
+ else:
+ return None
+
+def fileat(entries, path):
+ return pathdata(entries,path)
+
+def isadded(entries, path):
+ """(entries,path)
+ Returns true if the path exists and is added to the cvs tree."""
+ mytarget=pathdata(entries, path)
+ if mytarget:
+ if "cvs" in mytarget["status"]:
+ return 1
+
+ basedir=os.path.dirname(path)
+ filename=os.path.basename(path)
+
+ try:
+ myfile=open(basedir+"/CVS/Entries","r")
+ except IOError:
+ return 0
+ mylines=myfile.readlines()
+ myfile.close()
+
+ rep=re.compile("^\/"+re.escape(filename)+"\/");
+ for x in mylines:
+ if rep.search(x):
+ return 1
+
+ return 0
+
+def findnew(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that have been added but
+ have not yet been committed. Returns a list of paths, optionally prepended
+ with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "0" == entries["files"][myfile]["revision"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findnew(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findchanged(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that exist in the cvs tree
+ and differ from the committed version. Returns a list of paths, optionally
+ prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "current" not in entries["files"][myfile]["status"]:
+ if "exists" in entries["files"][myfile]["status"]:
+ if entries["files"][myfile]["revision"]!="0":
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findchanged(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findmissing(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are listed in the cvs
+ tree but do not exist on the filesystem. Returns a list of paths,
+ optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "exists" not in entries["files"][myfile]["status"]:
+ if "removed" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findmissing(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findunadded(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are in valid cvs
+ directories but are not part of the cvs tree. Returns a list of paths,
+ optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+
+ #ignore what cvs ignores.
+ for myfile in entries["files"].keys():
+ if "cvs" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findunadded(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findremoved(entries,recursive=0,basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all elements that are in flagged for cvs
+ deletions. Returns a list of paths, optionally prepended with a basedir."""
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mylist=[]
+ for myfile in entries["files"].keys():
+ if "removed" in entries["files"][myfile]["status"]:
+ mylist.append(basedir+myfile)
+ if recursive:
+ for mydir in entries["dirs"].keys():
+ mylist+=findremoved(entries["dirs"][mydir],recursive,basedir+mydir)
+ return mylist
+
+def findall(entries, recursive=0, basedir=""):
+ """(entries,recursive=0,basedir="")
+ Recurses the entries tree to find all new, changed, missing, and unadded
+ entities. Returns a 4 element list of lists as returned from each find*()."""
+
+ if basedir and basedir[-1]!="/":
+ basedir=basedir+"/"
+ mynew = findnew(entries,recursive,basedir)
+ mychanged = findchanged(entries,recursive,basedir)
+ mymissing = findmissing(entries,recursive,basedir)
+ myunadded = findunadded(entries,recursive,basedir)
+ myremoved = findremoved(entries,recursive,basedir)
+ return [mynew, mychanged, mymissing, myunadded, myremoved]
+
+ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
+def apply_cvsignore_filter(list):
+ x=0
+ while x < len(list):
+ if ignore_list.match(list[x].split("/")[-1]):
+ list.pop(x)
+ else:
+ x+=1
+ return list
+
+def getentries(mydir,recursive=0):
+ """(basedir,recursive=0)
+ Scans the given directory and returns an datadict of all the entries in
+ the directory seperated as a dirs dict and a files dict."""
+ myfn=mydir+"/CVS/Entries"
+ # entries=[dirs, files]
+ entries={"dirs":{},"files":{}}
+ if not os.path.exists(mydir):
+ return entries
+ try:
+ myfile=open(myfn, "r")
+ mylines=myfile.readlines()
+ myfile.close()
+ except SystemExit, e:
+ raise
+ except:
+ mylines=[]
+ for line in mylines:
+ if line and line[-1]=="\n":
+ line=line[:-1]
+ if not line:
+ continue
+ if line=="D": # End of entries file
+ break
+ mysplit=line.split("/")
+ if len(mysplit)!=6:
+ print "Confused:",mysplit
+ continue
+ if mysplit[0]=="D":
+ entries["dirs"][mysplit[1]]={"dirs":{},"files":{},"status":[]}
+ entries["dirs"][mysplit[1]]["status"]=["cvs"]
+ if os.path.isdir(mydir+"/"+mysplit[1]):
+ entries["dirs"][mysplit[1]]["status"]+=["exists"]
+ entries["dirs"][mysplit[1]]["flags"]=mysplit[2:]
+ if recursive:
+ rentries=getentries(mydir+"/"+mysplit[1],recursive)
+ #print rentries.keys()
+ #print entries["files"].keys()
+ #print entries["files"][mysplit[1]]
+ entries["dirs"][mysplit[1]]["dirs"]=rentries["dirs"]
+ entries["dirs"][mysplit[1]]["files"]=rentries["files"]
+ else:
+ # [D]/Name/revision/Date/Flags/Tags
+ entries["files"][mysplit[1]]={}
+ entries["files"][mysplit[1]]["revision"]=mysplit[2]
+ entries["files"][mysplit[1]]["date"]=mysplit[3]
+ entries["files"][mysplit[1]]["flags"]=mysplit[4]
+ entries["files"][mysplit[1]]["tags"]=mysplit[5]
+ entries["files"][mysplit[1]]["status"]=["cvs"]
+ if entries["files"][mysplit[1]]["revision"][0]=="-":
+ entries["files"][mysplit[1]]["status"]+=["removed"]
+
+ for file in apply_cvsignore_filter(os.listdir(mydir)):
+ if file=="CVS":
+ continue
+ if file=="digest-framerd-2.4.3":
+ print mydir,file
+ if os.path.isdir(mydir+"/"+file):
+ if not entries["dirs"].has_key(file):
+ entries["dirs"][file]={"dirs":{},"files":{}}
+ if entries["dirs"][file].has_key("status"):
+ if "exists" not in entries["dirs"][file]["status"]:
+ entries["dirs"][file]["status"]+=["exists"]
+ else:
+ entries["dirs"][file]["status"]=["exists"]
+ elif os.path.isfile(mydir+"/"+file):
+ if file=="digest-framerd-2.4.3":
+ print "isfile"
+ if not entries["files"].has_key(file):
+ entries["files"][file]={"revision":"","date":"","flags":"","tags":""}
+ if entries["files"][file].has_key("status"):
+ if file=="digest-framerd-2.4.3":
+ print "has status"
+ if "exists" not in entries["files"][file]["status"]:
+ if file=="digest-framerd-2.4.3":
+ print "no exists in status"
+ entries["files"][file]["status"]+=["exists"]
+ else:
+ if file=="digest-framerd-2.4.3":
+ print "no status"
+ entries["files"][file]["status"]=["exists"]
+ try:
+ if file=="digest-framerd-2.4.3":
+ print "stat'ing"
+ mystat=os.stat(mydir+"/"+file)
+ mytime=time.asctime(time.gmtime(mystat[ST_MTIME]))
+ if not entries["files"][file].has_key("status"):
+ if file=="digest-framerd-2.4.3":
+ print "status not set"
+ entries["files"][file]["status"]=[]
+ if file=="digest-framerd-2.4.3":
+ print "date:",entries["files"][file]["date"]
+ print "sdate:",mytime
+ if mytime==entries["files"][file]["date"]:
+ entries["files"][file]["status"]+=["current"]
+ if file=="digest-framerd-2.4.3":
+ print "stat done"
+
+ del mystat
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ print "failed to stat",file
+ print e
+ return
+
+ else:
+ print
+ print "File of unknown type:",mydir+"/"+file
+ print
+ return entries
+
+#class cvstree:
+# def __init__(self,basedir):
+# self.refdir=os.cwd()
+# self.basedir=basedir
+# self.entries={}
+# self.entries["dirs"]={}
+# self.entries["files"]={}
+# self.entries["dirs"][self.basedir]=getentries(self.basedir)
+# self.getrealdirs(self.dirs, self.files)
+# def getrealdirs(self,dirs,files):
+# for mydir in dirs.keys():
+# list = os.listdir(
+
+
diff --git a/pym/portage/data.py b/pym/portage/data.py
new file mode 100644
index 00000000..707c76b2
--- /dev/null
+++ b/pym/portage/data.py
@@ -0,0 +1,126 @@
+# portage_data.py -- Calculated/Discovered Data Values
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+
+import os,pwd,grp
+from portage_util import writemsg
+from output import green,red
+from output import create_color_func
+bad = create_color_func("BAD")
+
+ostype=os.uname()[0]
+
+lchown = None
+if ostype=="Linux" or ostype.lower().endswith("gnu"):
+ userland="GNU"
+ os.environ["XARGS"]="xargs -r"
+elif ostype == "Darwin":
+ userland="Darwin"
+ os.environ["XARGS"]="xargs"
+ def lchown(*pos_args, **key_args):
+ pass
+elif ostype.endswith("BSD") or ostype =="DragonFly":
+ userland="BSD"
+ os.environ["XARGS"]="xargs"
+else:
+ writemsg(red("Operating system")+" \""+ostype+"\" "+red("currently unsupported. Exiting.")+"\n")
+ sys.exit(1)
+
+if not lchown:
+ if "lchown" in dir(os):
+ # Included in python-2.3
+ lchown = os.lchown
+ else:
+ try:
+ import missingos
+ lchown = missingos.lchown
+ except ImportError:
+ def lchown(*pos_args, **key_args):
+ writemsg(red("!!!") + " It seems that os.lchown does not" + \
+ " exist. Please rebuild python.\n", noiselevel=-1)
+ lchown()
+
+os.environ["USERLAND"]=userland
+
+def portage_group_warning():
+ warn_prefix = bad("*** WARNING *** ")
+ mylines = [
+ "For security reasons, only system administrators should be",
+ "allowed in the portage group. Untrusted users or processes",
+ "can potentially exploit the portage group for attacks such as",
+ "local privilege escalation."
+ ]
+ for x in mylines:
+ writemsg(warn_prefix, noiselevel=-1)
+ writemsg(x, noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+# Portage has 3 security levels that depend on the uid and gid of the main
+# process and are assigned according to the following table:
+#
+# Privileges secpass uid gid
+# normal 0 any any
+# group 1 any portage_gid
+# super 2 0 any
+#
+# If the "wheel" group does not exist then wheelgid falls back to 0.
+# If the "portage" group does not exist then portage_uid falls back to wheelgid.
+
+secpass=0
+
+uid=os.getuid()
+wheelgid=0
+
+if uid==0:
+ secpass=2
+try:
+ wheelgid=grp.getgrnam("wheel")[2]
+except KeyError:
+ writemsg("portage initialization: your system doesn't have a 'wheel' group.\n")
+ writemsg("Please fix this as it is a normal system requirement. 'wheel' is GID 10\n")
+ writemsg("`emerge baselayout` and a config update with dispatch-conf, etc-update\n")
+ writemsg("or cfg-update should remedy this problem.\n")
+ pass
+
+#Discover the uid and gid of the portage user/group
+try:
+ portage_uid=pwd.getpwnam("portage")[2]
+ portage_gid=grp.getgrnam("portage")[2]
+ if secpass < 1 and portage_gid in os.getgroups():
+ secpass=1
+except KeyError:
+ portage_uid=0
+ portage_gid=0
+ writemsg("\n")
+ writemsg( red("portage: 'portage' user or group missing. Please update baselayout\n"))
+ writemsg( red(" and merge portage user(250) and group(250) into your passwd\n"))
+ writemsg( red(" and group files. Non-root compilation is disabled until then.\n"))
+ writemsg( " Also note that non-root/wheel users will need to be added to\n")
+ writemsg( " the portage group to do portage commands.\n")
+ writemsg("\n")
+ writemsg( " For the defaults, line 1 goes into passwd, and 2 into group.\n")
+ writemsg(green(" portage:x:250:250:portage:/var/tmp/portage:/bin/false\n"))
+ writemsg(green(" portage::250:portage\n"))
+ writemsg("\n")
+ portage_group_warning()
+
+userpriv_groups = [portage_gid]
+if secpass >= 2:
+ # Get a list of group IDs for the portage user. Do not use grp.getgrall()
+ # since it is known to trigger spurious SIGPIPE problems with nss_ldap.
+ from commands import getstatusoutput
+ mystatus, myoutput = getstatusoutput("id -G portage")
+ if mystatus == os.EX_OK:
+ for x in myoutput.split():
+ try:
+ userpriv_groups.append(int(x))
+ except ValueError:
+ pass
+ del x
+ userpriv_groups = list(set(userpriv_groups))
+ del getstatusoutput, mystatus, myoutput
diff --git a/pym/portage/debug.py b/pym/portage/debug.py
new file mode 100644
index 00000000..2ee8bcf2
--- /dev/null
+++ b/pym/portage/debug.py
@@ -0,0 +1,115 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+import os, sys, threading
+
+import portage_const
+from portage_util import writemsg
+
+def set_trace(on=True):
+ if on:
+ t = trace_handler()
+ threading.settrace(t.event_handler)
+ sys.settrace(t.event_handler)
+ else:
+ sys.settrace(None)
+ threading.settrace(None)
+
+class trace_handler(object):
+
+ def __init__(self):
+ python_system_paths = []
+ for x in sys.path:
+ if os.path.basename(x).startswith("python2."):
+ python_system_paths.append(x)
+
+ self.ignore_prefixes = []
+ for x in python_system_paths:
+ self.ignore_prefixes.append(x + os.sep)
+
+ self.trim_filename = prefix_trimmer(os.path.join(portage_const.PORTAGE_BASE_PATH, "pym") + os.sep).trim
+ self.show_local_lines = False
+ self.max_repr_length = 200
+
+ def event_handler(self, *args):
+ frame, event, arg = args
+ if "line" == event:
+ if self.show_local_lines:
+ self.trace_line(*args)
+ else:
+ if not self.ignore_filename(frame.f_code.co_filename):
+ self.trace_event(*args)
+ return self.event_handler
+
+ def trace_event(self, frame, event, arg):
+ writemsg("%s line=%d name=%s event=%s %slocals=%s\n" % \
+ (self.trim_filename(frame.f_code.co_filename),
+ frame.f_lineno,
+ frame.f_code.co_name,
+ event,
+ self.arg_repr(frame, event, arg),
+ self.locals_repr(frame, event, arg)))
+
+ def arg_repr(self, frame, event, arg):
+ my_repr = None
+ if "return" == event:
+ my_repr = repr(arg)
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "value=%s " % my_repr
+ elif "exception" == event:
+ my_repr = repr(arg[1])
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "type=%s value=%s " % (arg[0], my_repr)
+
+ return ""
+
+ def trace_line(self, frame, event, arg):
+ writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
+
+ def ignore_filename(self, filename):
+ if filename:
+ for x in self.ignore_prefixes:
+ if filename.startswith(x):
+ return True
+ return False
+
+ def locals_repr(self, frame, event, arg):
+ """Create a representation of the locals dict that is suitable for
+ tracing output."""
+
+ my_locals = frame.f_locals.copy()
+
+ # prevent unsafe __repr__ call on self when __init__ is called
+ # (method calls aren't safe until after __init__ has completed).
+ if frame.f_code.co_name == "__init__" and "self" in my_locals:
+ my_locals["self"] = "omitted"
+
+ # We omit items that will lead to unreasonable bloat of the trace
+ # output (and resulting log file).
+ for k, v in my_locals.iteritems():
+ my_repr = repr(v)
+ if len(my_repr) > self.max_repr_length:
+ my_locals[k] = "omitted"
+ return my_locals
+
+class prefix_trimmer(object):
+ def __init__(self, prefix):
+ self.prefix = prefix
+ self.cut_index = len(prefix)
+ self.previous = None
+ self.previous_trimmed = None
+
+ def trim(self, s):
+ """Remove a prefix from the string and return the result.
+ The previous result is automatically cached."""
+ if s == self.previous:
+ return self.previous_trimmed
+ else:
+ if s.startswith(self.prefix):
+ self.previous_trimmed = s[self.cut_index:]
+ else:
+ self.previous_trimmed = s
+ return self.previous_trimmed
diff --git a/pym/portage/dep.py b/pym/portage/dep.py
new file mode 100644
index 00000000..bf40452a
--- /dev/null
+++ b/pym/portage/dep.py
@@ -0,0 +1,646 @@
+# deps.py -- Portage dependency resolution functions
+# Copyright 2003-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+# DEPEND SYNTAX:
+#
+# 'use?' only affects the immediately following word!
+# Nesting is the only legal way to form multiple '[!]use?' requirements.
+#
+# Where: 'a' and 'b' are use flags, and 'z' is a depend atom.
+#
+# "a? z" -- If 'a' in [use], then b is valid.
+# "a? ( z )" -- Syntax with parenthesis.
+# "a? b? z" -- Deprecated.
+# "a? ( b? z )" -- Valid
+# "a? ( b? ( z ) ) -- Valid
+#
+
+import re, sys, types
+import portage_exception
+from portage_exception import InvalidData
+from portage_versions import catpkgsplit, catsplit, pkgcmp, pkgsplit, ververify
+
+def cpvequal(cpv1, cpv2):
+ split1 = catpkgsplit(cpv1)
+ split2 = catpkgsplit(cpv2)
+
+ if not split1 or not split2:
+ raise portage_exception.PortageException("Invalid data '%s, %s', parameter was not a CPV" % (cpv1, cpv2))
+
+ if split1[0] != split2[0]:
+ return False
+
+ return (pkgcmp(split1[1:], split2[1:]) == 0)
+
+def strip_empty(myarr):
+ """
+ Strip all empty elements from an array
+
+ @param myarr: The list of elements
+ @type myarr: List
+ @rtype: Array
+ @return: The array with empty elements removed
+ """
+ for x in range(len(myarr)-1, -1, -1):
+ if not myarr[x]:
+ del myarr[x]
+ return myarr
+
+def paren_reduce(mystr,tokenize=1):
+ """
+ Take a string and convert all paren enclosed entities into sublists, optionally
+ futher splitting the list elements by spaces.
+
+ Example usage:
+ >>> paren_reduce('foobar foo ( bar baz )',1)
+ ['foobar', 'foo', ['bar', 'baz']]
+ >>> paren_reduce('foobar foo ( bar baz )',0)
+ ['foobar foo ', [' bar baz ']]
+
+ @param mystr: The string to reduce
+ @type mystr: String
+ @param tokenize: Split on spaces to produces further list breakdown
+ @type tokenize: Integer
+ @rtype: Array
+ @return: The reduced string in an array
+ """
+ mylist = []
+ while mystr:
+ if ("(" not in mystr) and (")" not in mystr):
+ freesec = mystr
+ subsec = None
+ tail = ""
+ elif mystr[0] == ")":
+ return [mylist,mystr[1:]]
+ elif ("(" in mystr) and (mystr.index("(") < mystr.index(")")):
+ freesec,subsec = mystr.split("(",1)
+ subsec,tail = paren_reduce(subsec,tokenize)
+ else:
+ subsec,tail = mystr.split(")",1)
+ if tokenize:
+ subsec = strip_empty(subsec.split(" "))
+ return [mylist+subsec,tail]
+ return mylist+[subsec],tail
+ mystr = tail
+ if freesec:
+ if tokenize:
+ mylist = mylist + strip_empty(freesec.split(" "))
+ else:
+ mylist = mylist + [freesec]
+ if subsec is not None:
+ mylist = mylist + [subsec]
+ return mylist
+
+def paren_enclose(mylist):
+ """
+ Convert a list to a string with sublists enclosed with parens.
+
+ Example usage:
+ >>> test = ['foobar','foo',['bar','baz']]
+ >>> paren_enclose(test)
+ 'foobar foo ( bar baz )'
+
+ @param mylist: The list
+ @type mylist: List
+ @rtype: String
+ @return: The paren enclosed string
+ """
+ mystrparts = []
+ for x in mylist:
+ if isinstance(x, list):
+ mystrparts.append("( "+paren_enclose(x)+" )")
+ else:
+ mystrparts.append(x)
+ return " ".join(mystrparts)
+
+# This is just for use by emerge so that it can enable a backward compatibility
+# mode in order to gracefully deal with installed packages that have invalid
+# atoms or dep syntax.
+_dep_check_strict = True
+
+def use_reduce(deparray, uselist=[], masklist=[], matchall=0, excludeall=[]):
+ """
+ Takes a paren_reduce'd array and reduces the use? conditionals out
+ leaving an array with subarrays
+
+ @param deparray: paren_reduce'd list of deps
+ @type deparray: List
+ @param uselist: List of use flags
+ @type uselist: List
+ @param masklist: List of masked flags
+ @type masklist: List
+ @param matchall: Resolve all conditional deps unconditionally. Used by repoman
+ @type matchall: Integer
+ @rtype: List
+ @return: The use reduced depend array
+ """
+ # Quick validity checks
+ for x in range(len(deparray)):
+ if deparray[x] in ["||","&&"]:
+ if len(deparray) - 1 == x or not isinstance(deparray[x+1], list):
+ raise portage_exception.InvalidDependString(deparray[x]+" missing atom list in \""+paren_enclose(deparray)+"\"")
+ if deparray and deparray[-1] and deparray[-1][-1] == "?":
+ raise portage_exception.InvalidDependString("Conditional without target in \""+paren_enclose(deparray)+"\"")
+
+ global _dep_check_strict
+
+ mydeparray = deparray[:]
+ rlist = []
+ while mydeparray:
+ head = mydeparray.pop(0)
+
+ if type(head) == types.ListType:
+ additions = use_reduce(head, uselist, masklist, matchall, excludeall)
+ if additions:
+ rlist.append(additions)
+ elif rlist and rlist[-1] == "||":
+ #XXX: Currently some DEPEND strings have || lists without default atoms.
+ # raise portage_exception.InvalidDependString("No default atom(s) in \""+paren_enclose(deparray)+"\"")
+ rlist.append([])
+
+ else:
+ if head[-1] == "?": # Use reduce next group on fail.
+ # Pull any other use conditions and the following atom or list into a separate array
+ newdeparray = [head]
+ while isinstance(newdeparray[-1], str) and newdeparray[-1][-1] == "?":
+ if mydeparray:
+ newdeparray.append(mydeparray.pop(0))
+ else:
+ raise ValueError, "Conditional with no target."
+
+ # Deprecation checks
+ warned = 0
+ if len(newdeparray[-1]) == 0:
+ sys.stderr.write("Note: Empty target in string. (Deprecated)\n")
+ warned = 1
+ if len(newdeparray) != 2:
+ sys.stderr.write("Note: Nested use flags without parenthesis (Deprecated)\n")
+ warned = 1
+ if warned:
+ sys.stderr.write(" --> "+" ".join(map(str,[head]+newdeparray))+"\n")
+
+ # Check that each flag matches
+ ismatch = True
+ for head in newdeparray[:-1]:
+ head = head[:-1]
+ if head[0] == "!":
+ head_key = head[1:]
+ if not matchall and head_key in uselist or \
+ head_key in excludeall:
+ ismatch = False
+ break
+ elif head not in masklist:
+ if not matchall and head not in uselist:
+ ismatch = False
+ break
+ else:
+ ismatch = False
+
+ # If they all match, process the target
+ if ismatch:
+ target = newdeparray[-1]
+ if isinstance(target, list):
+ additions = use_reduce(target, uselist, masklist, matchall, excludeall)
+ if additions:
+ rlist.append(additions)
+ elif not _dep_check_strict:
+ # The old deprecated behavior.
+ rlist.append(target)
+ else:
+ raise portage_exception.InvalidDependString(
+ "Conditional without parenthesis: '%s?'" % head)
+
+ else:
+ rlist += [head]
+
+ return rlist
+
+
+def dep_opconvert(deplist):
+ """
+ Iterate recursively through a list of deps, if the
+ dep is a '||' or '&&' operator, combine it with the
+ list of deps that follows..
+
+ Example usage:
+ >>> test = ["blah", "||", ["foo", "bar", "baz"]]
+ >>> dep_opconvert(test)
+ ['blah', ['||', 'foo', 'bar', 'baz']]
+
+ @param deplist: A list of deps to format
+ @type mydep: List
+ @rtype: List
+ @return:
+ The new list with the new ordering
+ """
+
+ retlist = []
+ x = 0
+ while x != len(deplist):
+ if isinstance(deplist[x], list):
+ retlist.append(dep_opconvert(deplist[x]))
+ elif deplist[x] == "||" or deplist[x] == "&&":
+ retlist.append([deplist[x]] + dep_opconvert(deplist[x+1]))
+ x += 1
+ else:
+ retlist.append(deplist[x])
+ x += 1
+ return retlist
+
+def get_operator(mydep):
+ """
+ Return the operator used in a depstring.
+
+ Example usage:
+ >>> from portage_dep import *
+ >>> get_operator(">=test-1.0")
+ '>='
+
+ @param mydep: The dep string to check
+ @type mydep: String
+ @rtype: String
+ @return: The operator. One of:
+ '~', '=', '>', '<', '=*', '>=', or '<='
+ """
+ if mydep[0] == "~":
+ operator = "~"
+ elif mydep[0] == "=":
+ if mydep[-1] == "*":
+ operator = "=*"
+ else:
+ operator = "="
+ elif mydep[0] in "><":
+ if len(mydep) > 1 and mydep[1] == "=":
+ operator = mydep[0:2]
+ else:
+ operator = mydep[0]
+ else:
+ operator = None
+
+ return operator
+
+_dep_getcpv_cache = {}
+
+def dep_getcpv(mydep):
+ """
+ Return the category-package-version with any operators/slot specifications stripped off
+
+ Example usage:
+ >>> dep_getcpv('>=media-libs/test-3.0')
+ 'media-libs/test-3.0'
+
+ @param mydep: The depstring
+ @type mydep: String
+ @rtype: String
+ @return: The depstring with the operator removed
+ """
+ global _dep_getcpv_cache
+ retval = _dep_getcpv_cache.get(mydep, None)
+ if retval is not None:
+ return retval
+ mydep_orig = mydep
+ if mydep and mydep[0] == "*":
+ mydep = mydep[1:]
+ if mydep and mydep[-1] == "*":
+ mydep = mydep[:-1]
+ if mydep and mydep[0] == "!":
+ mydep = mydep[1:]
+ if mydep[:2] in [">=", "<="]:
+ mydep = mydep[2:]
+ elif mydep[:1] in "=<>~":
+ mydep = mydep[1:]
+ colon = mydep.rfind(":")
+ if colon != -1:
+ mydep = mydep[:colon]
+ _dep_getcpv_cache[mydep_orig] = mydep
+ return mydep
+
+def dep_getslot(mydep):
+ """
+ Retrieve the slot on a depend.
+
+ Example usage:
+ >>> dep_getslot('app-misc/test:3')
+ '3'
+
+ @param mydep: The depstring to retrieve the slot of
+ @type mydep: String
+ @rtype: String
+ @return: The slot
+ """
+ colon = mydep.rfind(":")
+ if colon != -1:
+ return mydep[colon+1:]
+ return None
+
+_invalid_atom_chars_regexp = re.compile("[()|?]")
+
+def isvalidatom(atom, allow_blockers=False):
+ """
+ Check to see if a depend atom is valid
+
+ Example usage:
+ >>> isvalidatom('media-libs/test-3.0')
+ 0
+ >>> isvalidatom('>=media-libs/test-3.0')
+ 1
+
+ @param atom: The depend atom to check against
+ @type atom: String
+ @rtype: Integer
+ @return: One of the following:
+ 1) 0 if the atom is invalid
+ 2) 1 if the atom is valid
+ """
+ global _invalid_atom_chars_regexp
+ if _invalid_atom_chars_regexp.search(atom):
+ return 0
+ if allow_blockers and atom.startswith("!"):
+ atom = atom[1:]
+ try:
+ mycpv_cps = catpkgsplit(dep_getcpv(atom))
+ except InvalidData:
+ return 0
+ operator = get_operator(atom)
+ if operator:
+ if operator[0] in "<>" and atom[-1] == "*":
+ return 0
+ if mycpv_cps and mycpv_cps[0] != "null":
+ # >=cat/pkg-1.0
+ return 1
+ else:
+ # >=cat/pkg or >=pkg-1.0 (no category)
+ return 0
+ if mycpv_cps:
+ # cat/pkg-1.0
+ return 0
+
+ if (len(atom.split('/')) == 2):
+ # cat/pkg
+ return 1
+ else:
+ return 0
+
+def isjustname(mypkg):
+ """
+ Checks to see if the depstring is only the package name (no version parts)
+
+ Example usage:
+ >>> isjustname('media-libs/test-3.0')
+ 0
+ >>> isjustname('test')
+ 1
+ >>> isjustname('media-libs/test')
+ 1
+
+ @param mypkg: The package atom to check
+ @param mypkg: String
+ @rtype: Integer
+ @return: One of the following:
+ 1) 0 if the package string is not just the package name
+ 2) 1 if it is
+ """
+ myparts = mypkg.split('-')
+ for x in myparts:
+ if ververify(x):
+ return 0
+ return 1
+
+iscache = {}
+
+def isspecific(mypkg):
+ """
+ Checks to see if a package is in category/package-version or package-version format,
+ possibly returning a cached result.
+
+ Example usage:
+ >>> isspecific('media-libs/test')
+ 0
+ >>> isspecific('media-libs/test-3.0')
+ 1
+
+ @param mypkg: The package depstring to check against
+ @type mypkg: String
+ @rtype: Integer
+ @return: One of the following:
+ 1) 0 if the package string is not specific
+ 2) 1 if it is
+ """
+ try:
+ return iscache[mypkg]
+ except KeyError:
+ pass
+ mysplit = mypkg.split("/")
+ if not isjustname(mysplit[-1]):
+ iscache[mypkg] = 1
+ return 1
+ iscache[mypkg] = 0
+ return 0
+
+def dep_getkey(mydep):
+ """
+ Return the category/package-name of a depstring.
+
+ Example usage:
+ >>> dep_getkey('media-libs/test-3.0')
+ 'media-libs/test'
+
+ @param mydep: The depstring to retrieve the category/package-name of
+ @type mydep: String
+ @rtype: String
+ @return: The package category/package-version
+ """
+ mydep = dep_getcpv(mydep)
+ if mydep and isspecific(mydep):
+ mysplit = catpkgsplit(mydep)
+ if not mysplit:
+ return mydep
+ return mysplit[0] + "/" + mysplit[1]
+ else:
+ return mydep
+
+def match_to_list(mypkg, mylist):
+ """
+ Searches list for entries that matches the package.
+
+ @param mypkg: The package atom to match
+ @type mypkg: String
+ @param mylist: The list of package atoms to compare against
+ @param mylist: List
+ @rtype: List
+ @return: A unique list of package atoms that match the given package atom
+ """
+ matches = []
+ for x in mylist:
+ if match_from_list(x, [mypkg]):
+ if x not in matches:
+ matches.append(x)
+ return matches
+
+def best_match_to_list(mypkg, mylist):
+ """
+ Returns the most specific entry that matches the package given.
+
+ @param mypkg: The package atom to check
+ @type mypkg: String
+ @param mylist: The list of package atoms to check against
+ @type mylist: List
+ @rtype: String
+ @return: The package atom which best matches given the following ordering:
+ - =cpv 6
+ - ~cpv 5
+ - =cpv* 4
+ - cp:slot 3
+ - >cpv 2
+ - <cpv 2
+ - >=cpv 2
+ - <=cpv 2
+ - cp 1
+ """
+ operator_values = {'=':6, '~':5, '=*':4,
+ '>':2, '<':2, '>=':2, '<=':2, None:1}
+ maxvalue = 0
+ bestm = None
+ for x in match_to_list(mypkg, mylist):
+ if dep_getslot(x) is not None:
+ if maxvalue < 3:
+ maxvalue = 3
+ bestm = x
+ continue
+ op_val = operator_values[get_operator(x)]
+ if op_val > maxvalue:
+ maxvalue = op_val
+ bestm = x
+ return bestm
+
+_match_from_list_cache = {}
+
+def match_from_list(mydep, candidate_list):
+ """
+ Searches list for entries that matches the package.
+
+ @param mydep: The package atom to match
+ @type mydep: String
+ @param candidate_list: The list of package atoms to compare against
+ @param candidate_list: List
+ @rtype: List
+ @return: A list of package atoms that match the given package atom
+ """
+
+ global _match_from_list_cache
+ cache_key = (mydep, tuple(candidate_list))
+ mylist = _match_from_list_cache.get(cache_key, None)
+ if mylist is not None:
+ return mylist[:]
+
+ from portage_util import writemsg
+ if mydep[0] == "!":
+ mydep = mydep[1:]
+
+ mycpv = dep_getcpv(mydep)
+ mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
+ slot = None
+
+ if not mycpv_cps:
+ cat, pkg = catsplit(mycpv)
+ ver = None
+ rev = None
+ slot = dep_getslot(mydep)
+ else:
+ cat, pkg, ver, rev = mycpv_cps
+ if mydep == mycpv:
+ raise KeyError("Specific key requires an operator" + \
+ " (%s) (try adding an '=')" % (mydep))
+
+ if ver and rev:
+ operator = get_operator(mydep)
+ if not operator:
+ writemsg("!!! Invalid atom: %s\n" % mydep, noiselevel=-1)
+ return []
+ else:
+ operator = None
+
+ mylist = []
+
+ if operator is None:
+ for x in candidate_list:
+ xs = pkgsplit(x)
+ if xs is None:
+ xcpv = dep_getcpv(x)
+ if slot is not None:
+ xslot = dep_getslot(x)
+ if xslot is not None and xslot != slot:
+ """ This function isn't given enough information to
+ reject atoms based on slot unless *both* compared atoms
+ specify slots."""
+ continue
+ if xcpv != mycpv:
+ continue
+ elif xs[0] != mycpv:
+ continue
+ mylist.append(x)
+
+ elif operator == "=": # Exact match
+ mylist = [cpv for cpv in candidate_list if cpvequal(cpv, mycpv)]
+
+ elif operator == "=*": # glob match
+ # XXX: Nasty special casing for leading zeros
+ # Required as =* is a literal prefix match, so can't
+ # use vercmp
+ mysplit = catpkgsplit(mycpv)
+ myver = mysplit[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ mycpv = mysplit[0]+"/"+mysplit[1]+"-"+myver
+ for x in candidate_list:
+ xs = catpkgsplit(x)
+ myver = xs[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ xcpv = xs[0]+"/"+xs[1]+"-"+myver
+ if xcpv.startswith(mycpv):
+ mylist.append(x)
+
+ elif operator == "~": # version, any revision, match
+ for x in candidate_list:
+ xs = catpkgsplit(x)
+ if xs is None:
+ raise InvalidData(x)
+ if not cpvequal(xs[0]+"/"+xs[1]+"-"+xs[2], mycpv_cps[0]+"/"+mycpv_cps[1]+"-"+mycpv_cps[2]):
+ continue
+ if xs[2] != ver:
+ continue
+ mylist.append(x)
+
+ elif operator in [">", ">=", "<", "<="]:
+ mysplit = ["%s/%s" % (cat, pkg), ver, rev]
+ for x in candidate_list:
+ try:
+ result = pkgcmp(pkgsplit(x), mysplit)
+ except ValueError: # pkgcmp may return ValueError during int() conversion
+ writemsg("\nInvalid package name: %s\n" % x, noiselevel=-1)
+ raise
+ if result is None:
+ continue
+ elif operator == ">":
+ if result > 0:
+ mylist.append(x)
+ elif operator == ">=":
+ if result >= 0:
+ mylist.append(x)
+ elif operator == "<":
+ if result < 0:
+ mylist.append(x)
+ elif operator == "<=":
+ if result <= 0:
+ mylist.append(x)
+ else:
+ raise KeyError("Unknown operator: %s" % mydep)
+ else:
+ raise KeyError("Unknown operator: %s" % mydep)
+
+ _match_from_list_cache[cache_key] = mylist
+ return mylist
diff --git a/pym/portage/dispatch_conf.py b/pym/portage/dispatch_conf.py
new file mode 100644
index 00000000..690772bf
--- /dev/null
+++ b/pym/portage/dispatch_conf.py
@@ -0,0 +1,161 @@
+# archive_conf.py -- functionality common to archive-conf and dispatch-conf
+# Copyright 2003-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+# Library by Wayne Davison <gentoo@blorf.net>, derived from code
+# written by Jeremy Wohl (http://igmus.org)
+
+from stat import *
+import os, sys, commands, shutil
+
+import portage
+
+RCS_BRANCH = '1.1.1'
+RCS_LOCK = 'rcs -ko -M -l'
+RCS_PUT = 'ci -t-"Archived config file." -m"dispatch-conf update."'
+RCS_GET = 'co'
+RCS_MERGE = 'rcsmerge -p -r' + RCS_BRANCH + ' %s >%s'
+
+DIFF3_MERGE = 'diff3 -mE %s %s %s >%s'
+
+def read_config(mandatory_opts):
+ try:
+ opts = portage.getconfig('/etc/dispatch-conf.conf')
+ except:
+ opts = None
+
+ if not opts:
+ print >> sys.stderr, 'dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'
+ sys.exit(1)
+
+ for key in mandatory_opts:
+ if not opts.has_key(key):
+ if key == "merge":
+ opts["merge"] = "sdiff --suppress-common-lines --output=%s %s %s"
+ else:
+ print >> sys.stderr, 'dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal' % (key,)
+
+ if not os.path.exists(opts['archive-dir']):
+ os.mkdir(opts['archive-dir'])
+ elif not os.path.isdir(opts['archive-dir']):
+ print >> sys.stderr, 'dispatch-conf: Config archive dir [%s] must exist; fatal' % (opts['archive-dir'],)
+ sys.exit(1)
+
+ return opts
+
+
+def rcs_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config in rcs (on trunk). Then, if mrgconf is
+ specified and an old branch version exists, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, leave it in the archive dir with a .dist.new
+ suffix along with the last 1.1.1 branch version with a .dist suffix."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except:
+ pass
+
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (curconf, archive, str(why))
+ if os.path.exists(archive + ',v'):
+ os.system(RCS_LOCK + ' ' + archive)
+ os.system(RCS_PUT + ' ' + archive)
+
+ ret = 0
+ if newconf != '':
+ os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
+ has_branch = os.path.exists(archive)
+ if has_branch:
+ os.rename(archive, archive + '.dist')
+
+ try:
+ shutil.copy2(newconf, archive)
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (newconf, archive, str(why))
+
+ if has_branch:
+ if mrgconf != '':
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(RCS_MERGE % (archive, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat[ST_MODE])
+ os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
+ os.rename(archive, archive + '.dist.new')
+ return ret
+
+
+def file_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config to the archive-dir, bumping old versions
+ out of the way into .# versions (log-rotate style). Then, if mrgconf
+ was specified and there is a .dist version, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, archive it as a .dist.new version (which
+ gets moved to the .dist version at the end of the processing)."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except:
+ pass
+
+ # Archive the current config file if it isn't already saved
+ if os.path.exists(archive) \
+ and len(commands.getoutput('diff -aq %s %s' % (curconf,archive))) != 0:
+ suf = 1
+ while suf < 9 and os.path.exists(archive + '.' + str(suf)):
+ suf += 1
+
+ while suf > 1:
+ os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
+ suf -= 1
+
+ os.rename(archive, archive + '.1')
+
+ try:
+ shutil.copy2(curconf, archive)
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (curconf, archive, str(why))
+
+ if newconf != '':
+ # Save off new config file in the archive dir with .dist.new suffix
+ try:
+ shutil.copy2(newconf, archive + '.dist.new')
+ except(IOError, os.error), why:
+ print >> sys.stderr, 'dispatch-conf: Error copying %s to %s: %s; fatal' % \
+ (newconf, archive + '.dist.new', str(why))
+
+ ret = 0
+ if mrgconf != '' and os.path.exists(archive + '.dist'):
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
+ mystat = os.lstat(newconf)
+ os.chmod(mrgconf, mystat[ST_MODE])
+ os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
+
+ return ret
+
+
+def rcs_archive_post_process(archive):
+ """Check in the archive file with the .dist.new suffix on the branch
+ and remove the one with the .dist suffix."""
+ os.rename(archive + '.dist.new', archive)
+ if os.path.exists(archive + '.dist'):
+ # Commit the last-distributed version onto the branch.
+ os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
+ os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
+ os.unlink(archive + '.dist')
+ else:
+ # Forcefully commit the last-distributed version onto the branch.
+ os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
+
+
+def file_archive_post_process(archive):
+ """Rename the archive file with the .dist.new suffix to a .dist suffix"""
+ os.rename(archive + '.dist.new', archive + '.dist')
diff --git a/pym/portage/eclass_cache.py b/pym/portage/eclass_cache.py
new file mode 100644
index 00000000..91b98fec
--- /dev/null
+++ b/pym/portage/eclass_cache.py
@@ -0,0 +1,83 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Nicholas Carpaski (carpaski@gentoo.org), Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id$
+
+from portage_util import normalize_path, writemsg
+import os, sys
+from portage_data import portage_gid
+
+class cache:
+ """
+ Maintains the cache information about eclasses used in ebuild.
+ """
+ def __init__(self, porttree_root, overlays=[]):
+ self.porttree_root = porttree_root
+
+ self.eclasses = {} # {"Name": ("location","_mtime_")}
+ self._eclass_locations = {}
+
+ # screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
+ # ~harring
+ self.porttrees = [self.porttree_root]+overlays
+ self.porttrees = tuple(map(normalize_path, self.porttrees))
+ self._master_eclass_root = os.path.join(self.porttrees[0],"eclass")
+ self.update_eclasses()
+
+ def close_caches(self):
+ import traceback
+ traceback.print_stack()
+ print "%s close_cache is deprecated" % self.__class__
+ self.eclasses.clear()
+
+ def flush_cache(self):
+ import traceback
+ traceback.print_stack()
+ print "%s flush_cache is deprecated" % self.__class__
+
+ self.update_eclasses()
+
+ def update_eclasses(self):
+ self.eclasses = {}
+ self._eclass_locations = {}
+ eclass_len = len(".eclass")
+ for x in [normalize_path(os.path.join(y,"eclass")) for y in self.porttrees]:
+ if not os.path.isdir(x):
+ continue
+ for y in [y for y in os.listdir(x) if y.endswith(".eclass")]:
+ try:
+ mtime = long(os.stat(os.path.join(x, y)).st_mtime)
+ except OSError:
+ continue
+ ys=y[:-eclass_len]
+ self.eclasses[ys] = (x, long(mtime))
+ self._eclass_locations[ys] = x
+
+ def is_eclass_data_valid(self, ec_dict):
+ if not isinstance(ec_dict, dict):
+ return False
+ for eclass, tup in ec_dict.iteritems():
+ cached_data = self.eclasses.get(eclass, None)
+ """ Only use the mtime for validation since the probability of a
+ collision is small and, depending on the cache implementation, the
+ path may not be specified (cache from rsync mirrors, for example).
+ """
+ if cached_data is None or tup[1] != cached_data[1]:
+ return False
+
+ return True
+
+ def get_eclass_data(self, inherits, from_master_only=False):
+ ec_dict = {}
+ for x in inherits:
+ try:
+ ec_dict[x] = self.eclasses[x]
+ except KeyError:
+ print "ec=",ec_dict
+ print "inherits=",inherits
+ raise
+ if from_master_only and \
+ self._eclass_locations[x] != self._master_eclass_root:
+ return None
+
+ return ec_dict
diff --git a/pym/elog_modules/__init__.py b/pym/portage/elog_modules/__init__.py
index e69de29b..e69de29b 100644
--- a/pym/elog_modules/__init__.py
+++ b/pym/portage/elog_modules/__init__.py
diff --git a/pym/elog_modules/mod_custom.py b/pym/portage/elog_modules/mod_custom.py
index d609e79b..d609e79b 100644
--- a/pym/elog_modules/mod_custom.py
+++ b/pym/portage/elog_modules/mod_custom.py
diff --git a/pym/elog_modules/mod_mail.py b/pym/portage/elog_modules/mod_mail.py
index b8e17a51..b8e17a51 100644
--- a/pym/elog_modules/mod_mail.py
+++ b/pym/portage/elog_modules/mod_mail.py
diff --git a/pym/elog_modules/mod_mail_summary.py b/pym/portage/elog_modules/mod_mail_summary.py
index 5e642f41..5e642f41 100644
--- a/pym/elog_modules/mod_mail_summary.py
+++ b/pym/portage/elog_modules/mod_mail_summary.py
diff --git a/pym/elog_modules/mod_save.py b/pym/portage/elog_modules/mod_save.py
index 4e1cd2cf..4e1cd2cf 100644
--- a/pym/elog_modules/mod_save.py
+++ b/pym/portage/elog_modules/mod_save.py
diff --git a/pym/elog_modules/mod_save_summary.py b/pym/portage/elog_modules/mod_save_summary.py
index 7cb310d9..7cb310d9 100644
--- a/pym/elog_modules/mod_save_summary.py
+++ b/pym/portage/elog_modules/mod_save_summary.py
diff --git a/pym/elog_modules/mod_syslog.py b/pym/portage/elog_modules/mod_syslog.py
index a95ecb45..a95ecb45 100644
--- a/pym/elog_modules/mod_syslog.py
+++ b/pym/portage/elog_modules/mod_syslog.py
diff --git a/pym/portage/emergehelp.py b/pym/portage/emergehelp.py
new file mode 100644
index 00000000..373e0bf4
--- /dev/null
+++ b/pym/portage/emergehelp.py
@@ -0,0 +1,420 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+import os,sys
+from output import bold, turquoise, green
+
+def shorthelp():
+ print
+ print
+ print bold("Usage:")
+ print " "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuildfile")+" | "+turquoise("tbz2file")+" | "+turquoise("dependency")+" ] [ ... ]"
+ print " "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("system")+" | "+turquoise("world")+" >"
+ print " "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >"
+ print " "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]"
+ print " "+turquoise("emerge")+" "+turquoise("--help")+" [ "+green("system")+" | "+green("world")+" | "+green("config")+" | "+green("--sync")+" ] "
+ print bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhikKlnNoOpqPsStuvV")+"] ["+green("--oneshot")+"] ["+green("--newuse")+"] ["+green("--noconfmem")+"]"
+ print " [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ] [ "+green("--columns")+" ]"
+ print " ["+green("--nospinner")+"]"
+ print " [ "+green("--deep")+" ] [" + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ]"
+ print bold("Actions:")+" [ "+green("--clean")+" | "+green("--depclean")+" | "+green("--prune")+" | "+green("--regen")+" | "+green("--search")+" | "+green("--unmerge")+" ]"
+ print
+
+def help(myaction,myopts,havecolor=1):
+ if not myaction and ("--help" not in myopts):
+ shorthelp()
+ print
+ print " For more help try 'emerge --help' or consult the man page."
+ print
+ elif not myaction:
+ shorthelp()
+ print
+ print turquoise("Help (this screen):")
+ print " "+green("--help")+" ("+green("-h")+" short option)"
+ print " Displays this help; an additional argument (see above) will tell"
+ print " emerge to display detailed help."
+ print
+ print turquoise("Actions:")
+ print " "+green("--clean")+" ("+green("-c")+" short option)"
+ print " Cleans the system by removing outdated packages which will not"
+ print " remove functionalities or prevent your system from working."
+ print " The arguments can be in several different formats :"
+ print " * world "
+ print " * system or"
+ print " * 'dependency specification' (in single quotes is best.)"
+ print " Here are a few examples of the dependency specification format:"
+ print " "+bold("binutils")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print " "+bold("sys-devel/binutils")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print " "+bold(">sys-devel/binutils-2.11.90.0.7")+" matches"
+ print " binutils-2.11.92.0.12.3-r1"
+ print " "+bold(">=sys-devel/binutils-2.11.90.0.7")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print " "+bold("<=sys-devel/binutils-2.11.92.0.12.3-r1")+" matches"
+ print " binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1"
+ print
+ print " "+green("--config")
+ print " Runs package-specific operations that must be executed after an"
+ print " emerge process has completed. This usually entails configuration"
+ print " file setup or other similar setups that the user may wish to run."
+ print
+ print " "+green("--depclean")
+ print " Cleans the system by removing packages that are not associated"
+ print " with explicitly merged packages. Depclean works by creating the"
+ print " full dependency tree from the system list and the world file,"
+ print " then comparing it to installed packages. Packages installed, but"
+ print " not associated with an explicit merge are listed as candidates"
+ print " for unmerging."+turquoise(" WARNING: This can seriously affect your system by")
+ print " "+turquoise("removing packages that may have been linked against, but due to")
+ print " "+turquoise("changes in USE flags may no longer be part of the dep tree. Use")
+ print " "+turquoise("caution when employing this feature.")
+ print
+ print " "+green("--info")
+ print " Displays important portage variables that will be exported to"
+ print " ebuild.sh when performing merges. This information is useful"
+ print " for bug reports and verification of settings. All settings in"
+ print " make.{conf,globals,defaults} and the environment show up if"
+ print " run with the '--verbose' flag."
+ print
+ print " "+green("--metadata")
+ print " Transfers metadata cache from ${PORTDIR}/metadata/cache/ to"
+ print " /var/cache/edb/dep/ as is normally done on the tail end of an"
+ print " rsync update using " + bold("emerge --sync") + ". This process populates the"
+ print " cache database that portage uses for pre-parsed lookups of"
+ print " package data. It does not populate cache for the overlays"
+ print " listed in PORTDIR_OVERLAY. In order to generate cache for"
+ print " overlays, use " + bold("--regen") + "."
+ print
+ print " "+green("--prune")+" ("+green("-P")+" short option)"
+ print " "+turquoise("WARNING: This action can remove important packages!")
+ print " Removes all but the most recently installed version of a package"
+ print " from your system. This action doesn't verify the possible binary"
+ print " compatibility between versions and can thus remove essential"
+ print " dependencies from your system."
+ print " The argument format is the same as for the "+bold("--clean")+" action."
+ print
+ print " "+green("--regen")
+ print " Causes portage to check and update the dependency cache of all"
+ print " ebuilds in the portage tree. This is not recommended for rsync"
+ print " users as rsync updates the cache using server-side caches."
+ print " Rsync users should simply 'emerge --sync' to regenerate."
+ print
+ print " "+green("--resume")
+ print " Resumes the last merge operation. It can be treated just like a"
+ print " regular emerge: --pretend and other options work alongside it."
+ print " 'emerge --resume' only returns an error on failure. When there is"
+ print " nothing to do, it exits with a message and a success condition."
+ print
+ print " "+green("--search")+" ("+green("-s")+" short option)"
+ print " Searches for matches of the supplied string in the current local"
+ print " portage tree. By default emerge uses a case-insensitive simple "
+ print " search, but you can enable a regular expression search by "
+ print " prefixing the search string with %."
+ print " Prepending the expression with a '@' will cause the category to"
+ print " be included in the search."
+ print " A few examples:"
+ print " "+bold("emerge --search libc")
+ print " list all packages that contain libc in their name"
+ print " "+bold("emerge --search '%^kde'")
+ print " list all packages starting with kde"
+ print " "+bold("emerge --search '%gcc$'")
+ print " list all packages ending with gcc"
+ print " "+bold("emerge --search '%@^dev-java.*jdk'")
+ print " list all available Java JDKs"
+ print
+ print " "+green("--searchdesc")+" ("+green("-S")+" short option)"
+ print " Matches the search string against the description field as well"
+ print " the package's name. Take caution as the descriptions are also"
+ print " matched as regular expressions."
+ print " emerge -S html"
+ print " emerge -S applet"
+ print " emerge -S 'perl.*module'"
+ print
+ print " "+green("--unmerge")+" ("+green("-C")+" short option)"
+ print " "+turquoise("WARNING: This action can remove important packages!")
+ print " Removes all matching packages "+bold("completely")+" from"
+ print " your system. Specify arguments using the dependency specification"
+ print " format described in the "+bold("--clean")+" action above."
+ print
+ print " "+green("--update")+" ("+green("-u")+" short option)"
+ print " Updates packages to the best version available, which may not"
+ print " always be the highest version number due to masking for testing"
+ print " and development. This will also update direct dependencies which"
+ print " may not what you want. Package atoms specified on the command line"
+ print " are greedy, meaning that unspecific atoms may match multiple"
+ print " installed versions of slotted packages."
+ print
+ print " "+green("--version")+" ("+green("-V")+" short option)"
+ print " Displays the currently installed version of portage along with"
+ print " other information useful for quick reference on a system. See"
+ print " "+bold("emerge info")+" for more advanced information."
+ print
+ print turquoise("Options:")
+ print " "+green("--alphabetical")
+ print " When displaying USE and other flag output, combines the enabled"
+ print " and disabled flags into a single list and sorts it alphabetically."
+ print " With this option, output such as USE=\"dar -bar -foo\" will instead"
+ print " be displayed as USE=\"-bar dar -foo\""
+ print
+ print " "+green("--ask")+" ("+green("-a")+" short option)"
+ print " before performing the merge, display what ebuilds and tbz2s will"
+ print " be installed, in the same format as when using --pretend; then"
+ print " ask whether to continue with the merge or abort. Using --ask is"
+ print " more efficient than using --pretend and then executing the same"
+ print " command without --pretend, as dependencies will only need to be"
+ print " calculated once. WARNING: If the \"Enter\" key is pressed at the"
+ print " prompt (with no other input), it is interpreted as acceptance of"
+ print " the first choice. Note that the input buffer is not cleared prior"
+ print " to the prompt, so an accidental press of the \"Enter\" key at any"
+ print " time prior to the prompt will be interpreted as a choice!"
+ print
+ print " "+green("--buildpkg")+" ("+green("-b")+" short option)"
+ print " Tell emerge to build binary packages for all ebuilds processed"
+ print " (in addition to actually merging the packages. Useful for"
+ print " maintainers or if you administrate multiple Gentoo Linux"
+ print " systems (build once, emerge tbz2s everywhere) as well as disaster"
+ print " recovery."
+ print
+ print " "+green("--buildpkgonly")+" ("+green("-B")+" short option)"
+ print " Creates a binary package, but does not merge it to the"
+ print " system. This has the restriction that unsatisfied dependencies"
+ print " must not exist for the desired package as they cannot be used if"
+ print " they do not exist on the system."
+ print
+ print " "+green("--changelog")+" ("+green("-l")+" short option)"
+ print " When pretending, also display the ChangeLog entries for packages"
+ print " that will be upgraded."
+ print
+ print " "+green("--color") + " < " + turquoise("y") + " | "+ turquoise("n")+" >"
+ print " Enable or disable color output. This option will override NOCOLOR"
+ print " (see make.conf(5)) and may also be used to force color output when"
+ print " stdout is not a tty (by default, color is disabled unless stdout"
+ print " is a tty)."
+ print
+ print " "+green("--columns")
+ print " Display the pretend output in a tabular form. Versions are"
+ print " aligned vertically."
+ print
+ print " "+green("--debug")+" ("+green("-d")+" short option)"
+ print " Tell emerge to run the ebuild command in --debug mode. In this"
+ print " mode, the bash build environment will run with the -x option,"
+ print " causing it to output verbose debug information print to stdout."
+ print " --debug is great for finding bash syntax errors as providing"
+ print " very verbose information about the dependency and build process."
+ print
+ print " "+green("--deep")+" ("+green("-D")+" short option)"
+ print " This flag forces emerge to consider the entire dependency tree of"
+ print " packages, instead of checking only the immediate dependencies of"
+ print " the packages. As an example, this catches updates in libraries"
+ print " that are not directly listed in the dependencies of a package."
+ print " Also see --with-bdeps for behavior with respect to build time"
+ print " dependencies that are not strictly required."
+ print
+ print " "+green("--emptytree")+" ("+green("-e")+" short option)"
+ print " Virtually tweaks the tree of installed packages to contain"
+ print " nothing. This is great to use together with --pretend. This makes"
+ print " it possible for developers to get a complete overview of the"
+ print " complete dependency tree of a certain package."
+ print
+ print " "+green("--fetchonly")+" ("+green("-f")+" short option)"
+ print " Instead of doing any package building, just perform fetches for"
+ print " all packages (main package as well as all dependencies.) When"
+ print " used in combination with --pretend all the SRC_URIs will be"
+ print " displayed multiple mirrors per line, one line per file."
+ print
+ print " "+green("--fetch-all-uri")+" ("+green("-F")+" short option)"
+ print " Same as --fetchonly except that all package files, including those"
+ print " not required to build the package, will be processed."
+ print
+ print " "+green("--getbinpkg")+" ("+green("-g")+" short option)"
+ print " Using the server and location defined in PORTAGE_BINHOST, portage"
+ print " will download the information from each binary file there and it"
+ print " will use that information to help build the dependency list. This"
+ print " option implies '-k'. (Use -gK for binary-only merging.)"
+ print
+ print " "+green("--getbinpkgonly")+" ("+green("-G")+" short option)"
+ print " This option is identical to -g, as above, except it will not use"
+ print " ANY information from the local machine. All binaries will be"
+ print " downloaded from the remote server without consulting packages"
+ print " existing in the packages directory."
+ print
+ print " "+green("--newuse")+" ("+green("-N")+" short option)"
+ print " Tells emerge to include installed packages where USE flags have "
+ print " changed since installation."
+ print
+ print " "+green("--noconfmem")
+ print " Portage keeps track of files that have been placed into"
+ print " CONFIG_PROTECT directories, and normally it will not merge the"
+ print " same file more than once, as that would become annoying. This"
+ print " can lead to problems when the user wants the file in the case"
+ print " of accidental deletion. With this option, files will always be"
+ print " merged to the live fs instead of silently dropped."
+ print
+ print " "+green("--nodeps")+" ("+green("-O")+" short option)"
+ print " Merge specified packages, but don't merge any dependencies."
+ print " Note that the build may fail if deps aren't satisfied."
+ print
+ print " "+green("--noreplace")+" ("+green("-n")+" short option)"
+ print " Skip the packages specified on the command-line that have"
+ print " already been installed. Without this option, any packages,"
+ print " ebuilds, or deps you specify on the command-line *will* cause"
+ print " Portage to remerge the package, even if it is already installed."
+ print " Note that Portage won't remerge dependencies by default."
+ print
+ print " "+green("--nospinner")
+ print " Disables the spinner regardless of terminal type."
+ print
+ print " "+green("--oneshot")+" ("+green("-1")+" short option)"
+ print " Emerge as normal, but don't add packages to the world profile."
+ print " This package will only be updated if it is depended upon by"
+ print " another package."
+ print
+ print " "+green("--onlydeps")+" ("+green("-o")+" short option)"
+ print " Only merge (or pretend to merge) the dependencies of the"
+ print " specified packages, not the packages themselves."
+ print
+ print " "+green("--pretend")+" ("+green("-p")+" short option)"
+ print " Instead of actually performing the merge, simply display what"
+ print " ebuilds and tbz2s *would* have been installed if --pretend"
+ print " weren't used. Using --pretend is strongly recommended before"
+ print " installing an unfamiliar package. In the printout, N = new,"
+ print " U = updating, R = replacing, F = fetch restricted, B = blocked"
+ print " by an already installed package, D = possible downgrading,"
+ print " S = slotted install. --verbose causes affecting use flags to be"
+ print " printed out accompanied by a '+' for enabled and a '-' for"
+ print " disabled USE flags."
+ print
+ print " "+green("--quiet")+" ("+green("-q")+" short option)"
+ print " Effects vary, but the general outcome is a reduced or condensed"
+ print " output from portage's displays."
+ print
+ print " "+green("--skipfirst")
+ print " This option is only valid in a resume situation. It removes the"
+ print " first package in the resume list so that a merge may continue in"
+ print " the presence of an uncorrectable or inconsequential error. This"
+ print " should only be used in cases where skipping the package will not"
+ print " result in failed dependencies."
+ print
+ print " "+green("--tree")+" ("+green("-t")+" short option)"
+ print " Shows the dependency tree using indentation for dependencies."
+ print " The packages are also listed in reverse merge order so that"
+ print " a package's dependencies follow the package. Only really useful"
+ print " in combination with --emptytree, --update or --deep."
+ print
+ print " "+green("--usepkg")+" ("+green("-k")+" short option)"
+ print " Tell emerge to use binary packages (from $PKGDIR) if they are"
+ print " available, thus possibly avoiding some time-consuming compiles."
+ print " This option is useful for CD installs; you can export"
+ print " PKGDIR=/mnt/cdrom/packages and then use this option to have"
+ print " emerge \"pull\" binary packages from the CD in order to satisfy"
+ print " dependencies."
+ print
+ print " "+green("--usepkgonly")+" ("+green("-K")+" short option)"
+ print " Like --usepkg above, except this only allows the use of binary"
+ print " packages, and it will abort the emerge if the package is not"
+ print " available at the time of dependency calculation."
+ print
+ print " "+green("--verbose")+" ("+green("-v")+" short option)"
+ print " Effects vary, but the general outcome is an increased or expanded"
+ print " display of content in portage's displays."
+ print
+ print " "+green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" >"
+ print " In dependency calculations, pull in build time dependencies that"
+ print " are not strictly required. This defaults to 'n' for installation"
+ print " actions and 'y' for the --depclean action. This setting can be"
+ print " added to EMERGE_DEFAULT_OPTS (see make.conf(5)) and later"
+ print " overridden via the command line."
+ print
+ elif myaction == "sync":
+ print
+ print bold("Usage: ")+turquoise("emerge")+" "+turquoise("--sync")
+ print
+ print " 'emerge --sync' tells emerge to update the Portage tree as specified in"
+ print " The SYNC variable found in /etc/make.conf. By default, SYNC instructs"
+ print " emerge to perform an rsync-style update with rsync.gentoo.org."
+ print
+ print " 'emerge-webrsync' exists as a helper app to emerge --sync, providing a"
+ print " method to receive the entire portage tree as a tarball that can be"
+ print " extracted and used. First time syncs would benefit greatly from this."
+ print
+ print " "+turquoise("WARNING:")
+ print " If using our rsync server, emerge will clean out all files that do not"
+ print " exist on it, including ones that you may have created. The exceptions"
+ print " to this are the distfiles, local and packages directories."
+ print
+ elif myaction=="system":
+ print
+ print bold("Usage: ")+turquoise("emerge")+" [ "+green("options")+" ] "+turquoise("system")
+ print
+ print " \"emerge system\" is the Portage system update command. When run, it"
+ print " will scan the etc/make.profile/packages file and determine what"
+ print " packages need to be installed so that your system meets the minimum"
+ print " requirements of your current system profile. Note that this doesn't"
+ print " necessarily bring your system up-to-date at all; instead, it just"
+ print " ensures that you have no missing parts. For example, if your system"
+ print " profile specifies that you should have sys-apps/iptables installed"
+ print " and you don't, then \"emerge system\" will install it (the most"
+ print " recent version that matches the profile spec) for you. It's always a"
+ print " good idea to do an \"emerge --pretend system\" before an \"emerge"
+ print " system\", just so you know what emerge is planning to do."
+ print
+ elif myaction=="world":
+ print
+ print bold("Usage: ")+turquoise("emerge")+" [ "+green("options")+" ] "+turquoise("world")
+ print
+ print " 'emerge world' is the Portage command for completely updating your"
+ print " system. The normal procedure is to first do an 'emerge --sync' and"
+ print " then an 'emerge --update --deep world'. The first command brings your"
+ print " local Portage tree up-to-date with the latest version information and"
+ print " ebuilds. The second command then rebuilds all packages for which newer"
+ print " versions or newer ebuilds have become available since you last did a"
+ print " sync and update."
+ print
+ elif myaction=="config":
+ outstuff=green("Config file management support (preliminary)")+"""
+
+Portage has a special feature called "config file protection". The purpose of
+this feature is to prevent new package installs from clobbering existing
+configuration files. By default, config file protection is turned on for /etc
+and the KDE configuration dirs; more may be added in the future.
+
+When Portage installs a file into a protected directory tree like /etc, any
+existing files will not be overwritten. If a file of the same name already
+exists, Portage will change the name of the to-be-installed file from 'foo' to
+'._cfg0000_foo'. If '._cfg0000_foo' already exists, this name becomes
+'._cfg0001_foo', etc. In this way, existing files are not overwritten,
+allowing the administrator to manually merge the new config files and avoid any
+unexpected changes.
+
+In addition to protecting overwritten files, Portage will not delete any files
+from a protected directory when a package is unmerged. While this may be a
+little bit untidy, it does prevent potentially valuable config files from being
+deleted, which is of paramount importance.
+
+Protected directories are set using the CONFIG_PROTECT variable, normally
+defined in /etc/make.globals. Directory exceptions to the CONFIG_PROTECTed
+directories can be specified using the CONFIG_PROTECT_MASK variable. To find
+files that need to be updated in /etc, type:
+
+# find /etc -iname '._cfg????_*'
+
+You can disable this feature by setting CONFIG_PROTECT="-*" in /etc/make.conf.
+Then, Portage will mercilessly auto-update your config files. Alternatively,
+you can leave Config File Protection on but tell Portage that it can overwrite
+files in certain specific /etc subdirectories. For example, if you wanted
+Portage to automatically update your rc scripts and your wget configuration,
+but didn't want any other changes made without your explicit approval, you'd
+add this to /etc/make.conf:
+
+CONFIG_PROTECT_MASK="/etc/wget /etc/rc.d"
+
+Tools such as dispatch-conf, cfg-update, and etc-update are also available to
+aid in the merging of these files. They provide interactive merging and can
+auto-merge trivial changes.
+
+"""
+ print outstuff
+
diff --git a/pym/portage/exception.py b/pym/portage/exception.py
new file mode 100644
index 00000000..4be72cf9
--- /dev/null
+++ b/pym/portage/exception.py
@@ -0,0 +1,100 @@
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+class PortageException(Exception):
+ """General superclass for portage exceptions"""
+ def __init__(self,value):
+ self.value = value[:]
+ def __str__(self):
+ if isinstance(self.value, basestring):
+ return self.value
+ else:
+ return repr(self.value)
+
+class CorruptionError(PortageException):
+ """Corruption indication"""
+
+class InvalidDependString(PortageException):
+ """An invalid depend string has been encountered"""
+
+class InvalidVersionString(PortageException):
+ """An invalid version string has been encountered"""
+
+class SecurityViolation(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+
+class IncorrectParameter(PortageException):
+ """A parameter of the wrong type was passed"""
+
+class MissingParameter(PortageException):
+ """A parameter is required for the action requested but was not passed"""
+
+class ParseError(PortageException):
+ """An error was generated while attempting to parse the request"""
+
+class InvalidData(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+
+class InvalidDataType(PortageException):
+ """An incorrect type was passed instead of the expected one"""
+
+class InvalidLocation(PortageException):
+ """Data was not found when it was expected to exist or was specified incorrectly"""
+
+class FileNotFound(InvalidLocation):
+ """A file was not found when it was expected to exist"""
+
+class DirectoryNotFound(InvalidLocation):
+ """A directory was not found when it was expected to exist"""
+
+class OperationNotPermitted(PortageException):
+ """An operation was not permitted operating system"""
+
+class PermissionDenied(PortageException):
+ """Permission denied"""
+
+class ReadOnlyFileSystem(PortageException):
+ """Read-only file system"""
+
+class CommandNotFound(PortageException):
+ """A required binary was not available or executable"""
+
+
+class PortagePackageException(PortageException):
+ """Malformed or missing package data"""
+
+class PackageNotFound(PortagePackageException):
+ """Missing Ebuild or Binary"""
+
+class InvalidPackageName(PortagePackageException):
+ """Malformed package name"""
+
+class InvalidAtom(PortagePackageException):
+ """Malformed atom spec"""
+
+class UnsupportedAPIException(PortagePackageException):
+ """Unsupported API"""
+ def __init__(self, cpv, eapi):
+ self.cpv, self.eapi = cpv, eapi
+ def __str__(self):
+ return "Unable to do any operations on '%s', due to the fact it's EAPI is higher then this portage versions. Please upgrade to a portage version that supports EAPI %s" % (self.cpv, self.eapi)
+
+
+
+class SignatureException(PortageException):
+ """Signature was not present in the checked file"""
+
+class DigestException(SignatureException):
+ """A problem exists in the digest"""
+
+class MissingSignature(SignatureException):
+ """Signature was not present in the checked file"""
+
+class InvalidSignature(SignatureException):
+ """Signature was checked and was not a valid, current, nor trusted signature"""
+
+class UntrustedSignature(SignatureException):
+ """Signature was not certified to the desired security level"""
+
diff --git a/pym/portage/exec.py b/pym/portage/exec.py
new file mode 100644
index 00000000..252fed2a
--- /dev/null
+++ b/pym/portage/exec.py
@@ -0,0 +1,336 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+import os, atexit, signal, sys
+import portage_data
+
+from portage_util import dump_traceback
+from portage_const import BASH_BINARY, SANDBOX_BINARY
+
+
+try:
+ import resource
+ max_fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+except ImportError:
+ max_fd_limit = 256
+
+if os.path.isdir("/proc/%i/fd" % os.getpid()):
+ def get_open_fds():
+ return map(int, [fd for fd in os.listdir("/proc/%i/fd" % os.getpid()) if fd.isdigit()])
+else:
+ def get_open_fds():
+ return xrange(max_fd_limit)
+
+sandbox_capable = (os.path.isfile(SANDBOX_BINARY) and
+ os.access(SANDBOX_BINARY, os.X_OK))
+
+def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
+ """
+ Spawns a bash shell running a specific commands
+
+ @param mycommand: The command for bash to run
+ @type mycommand: String
+ @param debug: Turn bash debugging on (set -x)
+ @type debug: Boolean
+ @param opt_name: Name of the spawned process (detaults to binary name)
+ @type opt_name: String
+ @param keywords: Extra Dictionary arguments to pass to spawn
+ @type keywords: Dictionary
+ """
+
+ args = [BASH_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ if debug:
+ # Print commands and their arguments as they are executed.
+ args.append("-x")
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_sandbox(mycommand, opt_name=None, **keywords):
+ if not sandbox_capable:
+ return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+ args=[SANDBOX_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+_exithandlers = []
+def atexit_register(func, *args, **kargs):
+ """Wrapper around atexit.register that is needed in order to track
+ what is registered. For example, when portage restarts itself via
+ os.execv, the atexit module does not work so we have to do it
+ manually by calling the run_exitfuncs() function in this module."""
+ _exithandlers.append((func, args, kargs))
+
+def run_exitfuncs():
+ """This should behave identically to the routine performed by
+ the atexit module at exit time. It's only necessary to call this
+ function when atexit will not work (because of os.execv, for
+ example)."""
+
+ # This function is a copy of the private atexit._run_exitfuncs()
+ # from the python 2.4.2 sources. The only difference from the
+ # original function is in the output to stderr.
+ exc_info = None
+ while _exithandlers:
+ func, targs, kargs = _exithandlers.pop()
+ try:
+ func(*targs, **kargs)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ except: # No idea what they called, so we need this broad except here.
+ dump_traceback("Error in portage_exec.run_exitfuncs", noiselevel=0)
+ exc_info = sys.exc_info()
+
+ if exc_info is not None:
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+atexit.register(run_exitfuncs)
+
+# We need to make sure that any processes spawned are killed off when
+# we exit. spawn() takes care of adding and removing pids to this list
+# as it creates and cleans up processes.
+spawned_pids = []
+def cleanup():
+ while spawned_pids:
+ pid = spawned_pids.pop()
+ try:
+ if os.waitpid(pid, os.WNOHANG) == (0, 0):
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+ except OSError:
+ # This pid has been cleaned up outside
+ # of spawn().
+ pass
+
+atexit_register(cleanup)
+
+def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
+ uid=None, gid=None, groups=None, umask=None, logfile=None,
+ path_lookup=True):
+ """
+ Spawns a given command.
+
+ @param mycommand: the command to execute
+ @type mycommand: String or List (Popen style list)
+ @param env: A dict of Key=Value pairs for env variables
+ @type env: Dictionary
+ @param opt_name: an optional name for the spawn'd process (defaults to the binary name)
+ @type opt_name: String
+ @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
+ @type fd_pipes: Dictionary
+ @param returnpid: Return the Process IDs for a successful spawn.
+ NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
+ @type returnpid: Boolean
+ @param uid: User ID to spawn as; useful for dropping privilages
+ @type uid: Integer
+ @param gid: Group ID to spawn as; useful for dropping privilages
+ @type gid: Integer
+ @param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
+ @type groups: List
+ @param umask: An integer representing the umask for the process (see man chmod for umask details)
+ @type umask: Integer
+ @param logfile: name of a file to use for logging purposes
+ @type logfile: String
+ @param path_lookup: If the binary is not fully specified then look for it in PATH
+ @type path_lookup: Boolean
+
+ logfile requires stdout and stderr to be assigned to this process (ie not pointed
+ somewhere else.)
+
+ """
+
+ # mycommand is either a str or a list
+ if isinstance(mycommand, str):
+ mycommand = mycommand.split()
+
+ # If an absolute path to an executable file isn't given
+ # search for it unless we've been told not to.
+ binary = mycommand[0]
+ if (not os.path.isabs(binary) or not os.path.isfile(binary)
+ or not os.access(binary, os.X_OK)):
+ binary = path_lookup and find_binary(binary) or None
+ if not binary:
+ return -1
+
+ # If we haven't been told what file descriptors to use
+ # default to propogating our stdin, stdout and stderr.
+ if fd_pipes is None:
+ fd_pipes = {0:0, 1:1, 2:2}
+
+ # mypids will hold the pids of all processes created.
+ mypids = []
+
+ if logfile:
+ # Using a log file requires that stdout and stderr
+ # are assigned to the process we're running.
+ if 1 not in fd_pipes or 2 not in fd_pipes:
+ raise ValueError(fd_pipes)
+
+ # Create a pipe
+ (pr, pw) = os.pipe()
+
+ # Create a tee process, giving it our stdout and stderr
+ # as well as the read end of the pipe.
+ mypids.extend(spawn(('tee', '-i', '-a', logfile),
+ returnpid=True, fd_pipes={0:pr,
+ 1:fd_pipes[1], 2:fd_pipes[2]}))
+
+ # We don't need the read end of the pipe, so close it.
+ os.close(pr)
+
+ # Assign the write end of the pipe to our stdout and stderr.
+ fd_pipes[1] = pw
+ fd_pipes[2] = pw
+
+ pid = os.fork()
+
+ if not pid:
+ try:
+ _exec(binary, mycommand, opt_name, fd_pipes,
+ env, gid, groups, uid, umask)
+ except Exception, e:
+ # We need to catch _any_ exception so that it doesn't
+ # propogate out of this function and cause exiting
+ # with anything other than os._exit()
+ sys.stderr.write("%s:\n %s\n" % (e, " ".join(mycommand)))
+ sys.stderr.flush()
+ os._exit(1)
+
+ # Add the pid to our local and the global pid lists.
+ mypids.append(pid)
+ spawned_pids.append(pid)
+
+ # If we started a tee process the write side of the pipe is no
+ # longer needed, so close it.
+ if logfile:
+ os.close(pw)
+
+ # If the caller wants to handle cleaning up the processes, we tell
+ # it about all processes that were created.
+ if returnpid:
+ return mypids
+
+ # Otherwise we clean them up.
+ while mypids:
+
+ # Pull the last reader in the pipe chain. If all processes
+ # in the pipe are well behaved, it will die when the process
+ # it is reading from dies.
+ pid = mypids.pop(0)
+
+ # and wait for it.
+ retval = os.waitpid(pid, 0)[1]
+
+ # When it's done, we can remove it from the
+ # global pid list as well.
+ spawned_pids.remove(pid)
+
+ if retval:
+ # If it failed, kill off anything else that
+ # isn't dead yet.
+ for pid in mypids:
+ if os.waitpid(pid, os.WNOHANG) == (0,0):
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+ spawned_pids.remove(pid)
+
+ # If it got a signal, return the signal that was sent.
+ if (retval & 0xff):
+ return ((retval & 0xff) << 8)
+
+ # Otherwise, return its exit code.
+ return (retval >> 8)
+
+ # Everything succeeded
+ return 0
+
+def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask):
+
+ """
+ Execute a given binary with options
+
+ @param binary: Name of program to execute
+ @type binary: String
+ @param mycommand: Options for program
+ @type mycommand: String
+ @param opt_name: Name of process (defaults to binary)
+ @type opt_name: String
+ @param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 }
+ @type fd_pipes: Dictionary
+ @param env: Key,Value mapping for Environmental Variables
+ @type env: Dictionary
+ @param gid: Group ID to run the process under
+ @type gid: Integer
+ @param groups: Groups the Process should be in.
+ @type groups: Integer
+ @param uid: User ID to run the process under
+ @type uid: Integer
+ @param umask: an int representing a unix umask (see man chmod for umask details)
+ @type umask: Integer
+ @rtype: None
+ @returns: Never returns (calls os.execve)
+ """
+
+ # If the process we're creating hasn't been given a name
+ # assign it the name of the executable.
+ if not opt_name:
+ opt_name = os.path.basename(binary)
+
+ # Set up the command's argument list.
+ myargs = [opt_name]
+ myargs.extend(mycommand[1:])
+
+ # Set up the command's pipes.
+ my_fds = {}
+ # To protect from cases where direct assignment could
+ # clobber needed fds ({1:2, 2:1}) we first dupe the fds
+ # into unused fds.
+ for fd in fd_pipes:
+ my_fds[fd] = os.dup(fd_pipes[fd])
+ # Then assign them to what they should be.
+ for fd in my_fds:
+ os.dup2(my_fds[fd], fd)
+ # Then close _all_ fds that haven't been explictly
+ # requested to be kept open.
+ for fd in get_open_fds():
+ if fd not in my_fds:
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+
+ # Set requested process permissions.
+ if gid:
+ os.setgid(gid)
+ if groups:
+ os.setgroups(groups)
+ if uid:
+ os.setuid(uid)
+ if umask:
+ os.umask(umask)
+
+ # And switch to the new process.
+ os.execve(binary, myargs, env)
+
+def find_binary(binary):
+ """
+ Given a binary name, find the binary in PATH
+
+ @param binary: Name of the binary to find
+ @type string
+ @rtype: None or string
+ @returns: full path to binary or None if the binary could not be located.
+ """
+
+ for path in os.getenv("PATH", "").split(":"):
+ filename = "%s/%s" % (path, binary)
+ if os.access(filename, os.X_OK) and os.path.isfile(filename):
+ return filename
+ return None
diff --git a/pym/portage/getbinpkg.py b/pym/portage/getbinpkg.py
new file mode 100644
index 00000000..462da429
--- /dev/null
+++ b/pym/portage/getbinpkg.py
@@ -0,0 +1,572 @@
+# getbinpkg.py -- Portage binary-package helper functions
+# Copyright 2003-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+
+from output import red, yellow, green
+import htmllib,HTMLParser,formatter,sys,os,xpak,time,tempfile,base64,urllib2
+
+try:
+ import cPickle
+except ImportError:
+ import pickle as cPickle
+
+try:
+ import ftplib
+except SystemExit, e:
+ raise
+except Exception, e:
+ sys.stderr.write(red("!!! CANNOT IMPORT FTPLIB: ")+str(e)+"\n")
+
+try:
+ import httplib
+except SystemExit, e:
+ raise
+except Exception, e:
+ sys.stderr.write(red("!!! CANNOT IMPORT HTTPLIB: ")+str(e)+"\n")
+
+def make_metadata_dict(data):
+ myid,myglob = data
+
+ mydict = {}
+ for x in xpak.getindex_mem(myid):
+ mydict[x] = xpak.getitem(data,x)
+
+ return mydict
+
+class ParseLinks(HTMLParser.HTMLParser):
+ """Parser class that overrides HTMLParser to grab all anchors from an html
+ page and provide suffix and prefix limitors"""
+ def __init__(self):
+ self.PL_anchors = []
+ HTMLParser.HTMLParser.__init__(self)
+
+ def get_anchors(self):
+ return self.PL_anchors
+
+ def get_anchors_by_prefix(self,prefix):
+ newlist = []
+ for x in self.PL_anchors:
+ if x.startswith(prefix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def get_anchors_by_suffix(self,suffix):
+ newlist = []
+ for x in self.PL_anchors:
+ if x.endswith(suffix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def handle_endtag(self,tag):
+ pass
+
+ def handle_starttag(self,tag,attrs):
+ if tag == "a":
+ for x in attrs:
+ if x[0] == 'href':
+ if x[1] not in self.PL_anchors:
+ self.PL_anchors.append(urllib2.unquote(x[1]))
+
+
+def create_conn(baseurl,conn=None):
+ """(baseurl,conn) --- Takes a protocol://site:port/address url, and an
+ optional connection. If connection is already active, it is passed on.
+ baseurl is reduced to address and is returned in tuple (conn,address)"""
+ parts = baseurl.split("://",1)
+ if len(parts) != 2:
+ raise ValueError, "Provided URL does not contain protocol identifier. '%s'" % baseurl
+ protocol,url_parts = parts
+ del parts
+ host,address = url_parts.split("/",1)
+ del url_parts
+ address = "/"+address
+
+ userpass_host = host.split("@",1)
+ if len(userpass_host) == 1:
+ host = userpass_host[0]
+ userpass = ["anonymous"]
+ else:
+ host = userpass_host[1]
+ userpass = userpass_host[0].split(":")
+ del userpass_host
+
+ if len(userpass) > 2:
+ raise ValueError, "Unable to interpret username/password provided."
+ elif len(userpass) == 2:
+ username = userpass[0]
+ password = userpass[1]
+ elif len(userpass) == 1:
+ username = userpass[0]
+ password = None
+ del userpass
+
+ http_headers = {}
+ http_params = {}
+ if username and password:
+ http_headers = {
+ "Authorization": "Basic %s" %
+ base64.encodestring("%s:%s" % (username, password)).replace(
+ "\012",
+ ""
+ ),
+ }
+
+ if not conn:
+ if protocol == "https":
+ conn = httplib.HTTPSConnection(host)
+ elif protocol == "http":
+ conn = httplib.HTTPConnection(host)
+ elif protocol == "ftp":
+ passive = 1
+ if(host[-1] == "*"):
+ passive = 0
+ host = host[:-1]
+ conn = ftplib.FTP(host)
+ if password:
+ conn.login(username,password)
+ else:
+ sys.stderr.write(yellow(" * No password provided for username")+" '"+str(username)+"'\n\n")
+ conn.login(username)
+ conn.set_pasv(passive)
+ conn.set_debuglevel(0)
+ else:
+ raise NotImplementedError, "%s is not a supported protocol." % protocol
+
+ return (conn,protocol,address, http_params, http_headers)
+
+def make_ftp_request(conn, address, rest=None, dest=None):
+ """(conn,address,rest) --- uses the conn object to request the data
+ from address and issuing a rest if it is passed."""
+ try:
+
+ if dest:
+ fstart_pos = dest.tell()
+
+ conn.voidcmd("TYPE I")
+ fsize = conn.size(address)
+
+ if (rest != None) and (rest < 0):
+ rest = fsize+int(rest)
+ if rest < 0:
+ rest = 0
+
+ if rest != None:
+ mysocket = conn.transfercmd("RETR "+str(address), rest)
+ else:
+ mysocket = conn.transfercmd("RETR "+str(address))
+
+ mydata = ""
+ while 1:
+ somedata = mysocket.recv(8192)
+ if somedata:
+ if dest:
+ dest.write(somedata)
+ else:
+ mydata = mydata + somedata
+ else:
+ break
+
+ if dest:
+ data_size = fstart_pos - dest.tell()
+ else:
+ data_size = len(mydata)
+
+ mysocket.close()
+ conn.voidresp()
+ conn.voidcmd("TYPE A")
+
+ return mydata,not (fsize==data_size),""
+
+ except ValueError, e:
+ return None,int(str(e)[:4]),str(e)
+
+
+def make_http_request(conn, address, params={}, headers={}, dest=None):
+ """(conn,address,params,headers) --- uses the conn object to request
+ the data from address, performing Location forwarding and using the
+ optional params and headers."""
+
+ rc = 0
+ response = None
+ while (rc == 0) or (rc == 301) or (rc == 302):
+ try:
+ if (rc != 0):
+ conn,ignore,ignore,ignore,ignore = create_conn(address)
+ conn.request("GET", address, params, headers)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ return None,None,"Server request failed: "+str(e)
+ response = conn.getresponse()
+ rc = response.status
+
+ # 301 means that the page address is wrong.
+ if ((rc == 301) or (rc == 302)):
+ ignored_data = response.read()
+ del ignored_data
+ for x in str(response.msg).split("\n"):
+ parts = x.split(": ",1)
+ if parts[0] == "Location":
+ if (rc == 301):
+ sys.stderr.write(red("Location has moved: ")+str(parts[1])+"\n")
+ if (rc == 302):
+ sys.stderr.write(red("Location has temporarily moved: ")+str(parts[1])+"\n")
+ address = parts[1]
+ break
+
+ if (rc != 200) and (rc != 206):
+ sys.stderr.write(str(response.msg)+"\n")
+ sys.stderr.write(response.read()+"\n")
+ sys.stderr.write("address: "+address+"\n")
+ return None,rc,"Server did not respond successfully ("+str(response.status)+": "+str(response.reason)+")"
+
+ if dest:
+ dest.write(response.read())
+ return "",0,""
+
+ return response.read(),0,""
+
+
+def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
+ myarray = []
+
+ if not (prefix and suffix):
+ match_both = 0
+
+ for x in array:
+ add_p = 0
+ if prefix and (len(x) >= len(prefix)) and (x[:len(prefix)] == prefix):
+ add_p = 1
+
+ if match_both:
+ if prefix and not add_p: # Require both, but don't have first one.
+ continue
+ else:
+ if add_p: # Only need one, and we have it.
+ myarray.append(x[:])
+ continue
+
+ if not allow_overlap: # Not allow to overlap prefix and suffix
+ if len(x) >= (len(prefix)+len(suffix)):
+ y = x[len(prefix):]
+ else:
+ continue # Too short to match.
+ else:
+ y = x # Do whatever... We're overlapping.
+
+ if suffix and (len(x) >= len(suffix)) and (x[-len(suffix):] == suffix):
+ myarray.append(x) # It matches
+ else:
+ continue # Doesn't match.
+
+ return myarray
+
+
+
+def dir_get_list(baseurl,conn=None):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ listing = None
+ if protocol in ["http","https"]:
+ page,rc,msg = make_http_request(conn,address,params,headers)
+
+ if page:
+ parser = ParseLinks()
+ parser.feed(page)
+ del page
+ listing = parser.get_anchors()
+ else:
+ raise Exception, "Unable to get listing: %s %s" % (rc,msg)
+ elif protocol in ["ftp"]:
+ if address[-1] == '/':
+ olddir = conn.pwd()
+ conn.cwd(address)
+ listing = conn.nlst()
+ conn.cwd(olddir)
+ del olddir
+ else:
+ listing = conn.nlst(address)
+ else:
+ raise TypeError, "Unknown protocol. '%s'" % protocol
+
+ if not keepconnection:
+ conn.close()
+
+ return listing
+
+def file_get_metadata(baseurl,conn=None, chunk_size=3000):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ if protocol in ["http","https"]:
+ headers["Range"] = "bytes=-"+str(chunk_size)
+ data,rc,msg = make_http_request(conn, address, params, headers)
+ elif protocol in ["ftp"]:
+ data,rc,msg = make_ftp_request(conn, address, -chunk_size)
+ else:
+ raise TypeError, "Unknown protocol. '%s'" % protocol
+
+ if data:
+ xpaksize = xpak.decodeint(data[-8:-4])
+ if (xpaksize+8) > chunk_size:
+ myid = file_get_metadata(baseurl, conn, (xpaksize+8))
+ if not keepconnection:
+ conn.close()
+ return myid
+ else:
+ xpak_data = data[len(data)-(xpaksize+8):-8]
+ del data
+
+ myid = xpak.xsplit_mem(xpak_data)
+ if not myid:
+ myid = None,None
+ del xpak_data
+ else:
+ myid = None,None
+
+ if not keepconnection:
+ conn.close()
+
+ return myid
+
+
+def file_get(baseurl,dest,conn=None,fcmd=None):
+ """(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://[user[:pass]@]<site>[:port]<path>"""
+
+ if not fcmd:
+ return file_get_lib(baseurl,dest,conn)
+
+ fcmd = fcmd.replace("${DISTDIR}",dest)
+ fcmd = fcmd.replace("${URI}", baseurl)
+ fcmd = fcmd.replace("${FILE}", os.path.basename(baseurl))
+ mysplit = fcmd.split()
+ mycmd = mysplit[0]
+ myargs = [os.path.basename(mycmd)]+mysplit[1:]
+ mypid=os.fork()
+ if mypid == 0:
+ try:
+ os.execv(mycmd,myargs)
+ except OSError:
+ pass
+ sys.stderr.write("!!! Failed to spawn fetcher.\n")
+ sys.stderr.flush()
+ os._exit(1)
+ retval=os.waitpid(mypid,0)[1]
+ if (retval & 0xff) == 0:
+ retval = retval >> 8
+ else:
+ sys.stderr.write("Spawned processes caught a signal.\n")
+ sys.exit(1)
+ if retval != 0:
+ sys.stderr.write("Fetcher exited with a failure condition.\n")
+ return 0
+ return 1
+
+def file_get_lib(baseurl,dest,conn=None):
+ """(baseurl[,connection]) -- Takes a base url to connect to and read from.
+ URL should be in the for <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ sys.stderr.write("Fetching '"+str(os.path.basename(address)+"'\n"))
+ if protocol in ["http","https"]:
+ data,rc,msg = make_http_request(conn, address, params, headers, dest=dest)
+ elif protocol in ["ftp"]:
+ data,rc,msg = make_ftp_request(conn, address, dest=dest)
+ else:
+ raise TypeError, "Unknown protocol. '%s'" % protocol
+
+ if not keepconnection:
+ conn.close()
+
+ return rc
+
+
+def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
+ """(baseurl,conn,chunk_size,verbose) --
+ """
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ if makepickle is None:
+ makepickle = "/var/cache/edb/metadata.idx.most_recent"
+
+ conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+ filedict = {}
+
+ try:
+ metadatafile = open("/var/cache/edb/remote_metadata.pickle")
+ metadata = cPickle.load(metadatafile)
+ sys.stderr.write("Loaded metadata pickle.\n")
+ metadatafile.close()
+ except (cPickle.UnpicklingError, OSError, IOError, EOFError):
+ metadata = {}
+ if not metadata.has_key(baseurl):
+ metadata[baseurl]={}
+ if not metadata[baseurl].has_key("indexname"):
+ metadata[baseurl]["indexname"]=""
+ if not metadata[baseurl].has_key("timestamp"):
+ metadata[baseurl]["timestamp"]=0
+ if not metadata[baseurl].has_key("unmodified"):
+ metadata[baseurl]["unmodified"]=0
+ if not metadata[baseurl].has_key("data"):
+ metadata[baseurl]["data"]={}
+
+ filelist = dir_get_list(baseurl, conn)
+ tbz2list = match_in_array(filelist, suffix=".tbz2")
+ metalist = match_in_array(filelist, prefix="metadata.idx")
+ del filelist
+
+ # Determine if our metadata file is current.
+ metalist.sort()
+ metalist.reverse() # makes the order new-to-old.
+ havecache=0
+ for mfile in metalist:
+ if usingcache and \
+ ((metadata[baseurl]["indexname"] != mfile) or \
+ (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))):
+ # Try to download new cache until we succeed on one.
+ data=""
+ for trynum in [1,2,3]:
+ mytempfile = tempfile.TemporaryFile()
+ try:
+ file_get(baseurl+"/"+mfile, mytempfile, conn)
+ if mytempfile.tell() > len(data):
+ mytempfile.seek(0)
+ data = mytempfile.read()
+ except ValueError, e:
+ sys.stderr.write("--- "+str(e)+"\n")
+ if trynum < 3:
+ sys.stderr.write("Retrying...\n")
+ mytempfile.close()
+ continue
+ if match_in_array([mfile],suffix=".gz"):
+ sys.stderr.write("gzip'd\n")
+ try:
+ import gzip
+ mytempfile.seek(0)
+ gzindex = gzip.GzipFile(mfile[:-3],'rb',9,mytempfile)
+ data = gzindex.read()
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ mytempfile.close()
+ sys.stderr.write("!!! Failed to use gzip: "+str(e)+"\n")
+ mytempfile.close()
+ try:
+ metadata[baseurl]["data"] = cPickle.loads(data)
+ del data
+ metadata[baseurl]["indexname"] = mfile
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadata[baseurl]["modified"] = 0 # It's not, right after download.
+ sys.stderr.write("Pickle loaded.\n")
+ break
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ sys.stderr.write("!!! Failed to read data from index: "+str(mfile)+"\n")
+ sys.stderr.write("!!! "+str(e)+"\n")
+ try:
+ metadatafile = open("/var/cache/edb/remote_metadata.pickle", "w+")
+ cPickle.dump(metadata,metadatafile)
+ metadatafile.close()
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ sys.stderr.write("!!! Failed to write binary metadata to disk!\n")
+ sys.stderr.write("!!! "+str(e)+"\n")
+ break
+ # We may have metadata... now we run through the tbz2 list and check.
+ sys.stderr.write(yellow("cache miss: 'x'")+" --- "+green("cache hit: 'o'")+"\n")
+ binpkg_filenames = set()
+ for x in tbz2list:
+ x = os.path.basename(x)
+ binpkg_filenames.add(x)
+ if ((not metadata[baseurl]["data"].has_key(x)) or \
+ (x not in metadata[baseurl]["data"].keys())):
+ sys.stderr.write(yellow("x"))
+ metadata[baseurl]["modified"] = 1
+ myid = None
+ for retry in xrange(3):
+ try:
+ myid = file_get_metadata(
+ "/".join((baseurl.rstrip("/"), x.lstrip("/"))),
+ conn, chunk_size)
+ break
+ except httplib.BadStatusLine:
+ # Sometimes this error is thrown from conn.getresponse() in
+ # make_http_request(). The docstring for this error in
+ # httplib.py says "Presumably, the server closed the
+ # connection before sending a valid response".
+ conn, protocol, address, params, headers = create_conn(
+ baseurl)
+
+ if myid and myid[0]:
+ metadata[baseurl]["data"][x] = make_metadata_dict(myid)
+ elif verbose:
+ sys.stderr.write(red("!!! Failed to retrieve metadata on: ")+str(x)+"\n")
+ else:
+ sys.stderr.write(green("o"))
+ # Cleanse stale cache for files that don't exist on the server anymore.
+ stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
+ if stale_cache:
+ for x in stale_cache:
+ del metadata[baseurl]["data"][x]
+ metadata[baseurl]["modified"] = 1
+ del stale_cache
+ del binpkg_filenames
+ sys.stderr.write("\n")
+
+ try:
+ if metadata[baseurl].has_key("modified") and metadata[baseurl]["modified"]:
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadatafile = open("/var/cache/edb/remote_metadata.pickle", "w+")
+ cPickle.dump(metadata,metadatafile)
+ metadatafile.close()
+ if makepickle:
+ metadatafile = open(makepickle, "w")
+ cPickle.dump(metadata[baseurl]["data"],metadatafile)
+ metadatafile.close()
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ sys.stderr.write("!!! Failed to write binary metadata to disk!\n")
+ sys.stderr.write("!!! "+str(e)+"\n")
+
+ if not keepconnection:
+ conn.close()
+
+ return metadata[baseurl]["data"]
diff --git a/pym/portage/gpg.py b/pym/portage/gpg.py
new file mode 100644
index 00000000..04ed6004
--- /dev/null
+++ b/pym/portage/gpg.py
@@ -0,0 +1,149 @@
+# portage_gpg.py -- core Portage functionality
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+import os
+import copy
+import types
+import commands
+import portage_exception
+import portage_checksum
+
+GPG_BINARY = "/usr/bin/gpg"
+GPG_OPTIONS = " --lock-never --no-random-seed-file --no-greeting --no-sig-cache "
+GPG_VERIFY_FLAGS = " --verify "
+GPG_KEYDIR = " --homedir '%s' "
+GPG_KEYRING = " --keyring '%s' "
+
+UNTRUSTED = 0
+EXISTS = UNTRUSTED + 1
+MARGINAL = EXISTS + 1
+TRUSTED = MARGINAL + 1
+
+def fileStats(filepath):
+ mya = []
+ for x in os.stat(filepath):
+ mya.append(x)
+ mya.append(portage_checksum.perform_checksum(filepath))
+ return mya
+
+
+class FileChecker:
+ def __init__(self,keydir=None,keyring=None,requireSignedRing=False,minimumTrust=EXISTS):
+ self.minimumTrust = TRUSTED # Default we require trust. For rings.
+ self.keydir = None
+ self.keyring = None
+ self.keyringPath = None
+ self.keyringStats = None
+ self.keyringIsTrusted = False
+
+ if (keydir != None):
+ # Verify that the keydir is valid.
+ if type(keydir) != types.StringType:
+ raise portage_exception.InvalidDataType, "keydir argument: %s" % keydir
+ if not os.path.isdir(keydir):
+ raise portage_exception.DirectoryNotFound, "keydir: %s" % keydir
+ self.keydir = copy.deepcopy(keydir)
+
+ if (keyring != None):
+ # Verify that the keyring is a valid filename and exists.
+ if type(keyring) != types.StringType:
+ raise portage_exception.InvalidDataType, "keyring argument: %s" % keyring
+ if keyring.find("/") != -1:
+ raise portage_exception.InvalidData, "keyring: %s" % keyring
+ pathname = ""
+ if keydir:
+ pathname = keydir + "/" + keyring
+ if not os.path.isfile(pathname):
+ raise portage_exception.FileNotFound, "keyring missing: %s (dev.gentoo.org/~carpaski/gpg/)" % pathname
+
+ keyringPath = keydir+"/"+keyring
+
+ if not keyring or not keyringPath and requireSignedRing:
+ raise portage_exception.MissingParameter
+
+ self.keyringStats = fileStats(keyringPath)
+ self.minimumTrust = TRUSTED
+ if not self.verify(keyringPath, keyringPath+".asc"):
+ self.keyringIsTrusted = False
+ if requireSignedRing:
+ raise portage_exception.InvalidSignature, "Required keyring verification: "+keyringPath
+ else:
+ self.keyringIsTrusted = True
+
+ self.keyring = copy.deepcopy(keyring)
+ self.keyringPath = self.keydir+"/"+self.keyring
+ self.minimumTrust = minimumTrust
+
+ def _verifyKeyring(self):
+ if self.keyringStats and self.keyringPath:
+ new_stats = fileStats(self.keyringPath)
+ if new_stats != self.keyringStats:
+ raise portage_exception.SecurityViolation, "GPG keyring changed!"
+
+ def verify(self, filename, sigfile=None):
+ """Uses minimumTrust to determine if it is Valid/True or Invalid/False"""
+ self._verifyKeyring()
+
+ if not os.path.isfile(filename):
+ raise portage_exception.FileNotFound, filename
+
+ if sigfile and not os.path.isfile(sigfile):
+ raise portage_exception.FileNotFound, sigfile
+
+ if self.keydir and not os.path.isdir(self.keydir):
+ raise portage_exception.DirectoryNotFound, filename
+
+ if self.keyringPath:
+ if not os.path.isfile(self.keyringPath):
+ raise portage_exception.FileNotFound, self.keyringPath
+
+ if not os.path.isfile(filename):
+ raise portage_exception.CommandNotFound, filename
+
+ command = GPG_BINARY + GPG_VERIFY_FLAGS + GPG_OPTIONS
+ if self.keydir:
+ command += GPG_KEYDIR % (self.keydir)
+ if self.keyring:
+ command += GPG_KEYRING % (self.keyring)
+
+ if sigfile:
+ command += " '"+sigfile+"'"
+ command += " '"+filename+"'"
+
+ result,output = commands.getstatusoutput(command)
+
+ signal = result & 0xff
+ result = (result >> 8)
+
+ if signal:
+ raise SignalCaught, "Signal: %d" % (signal)
+
+ trustLevel = UNTRUSTED
+ if result == 0:
+ trustLevel = TRUSTED
+ #if output.find("WARNING") != -1:
+ # trustLevel = MARGINAL
+ if output.find("BAD") != -1:
+ raise portage_exception.InvalidSignature, filename
+ elif result == 1:
+ trustLevel = EXISTS
+ if output.find("BAD") != -1:
+ raise portage_exception.InvalidSignature, filename
+ elif result == 2:
+ trustLevel = UNTRUSTED
+ if output.find("could not be verified") != -1:
+ raise portage_exception.MissingSignature, filename
+ if output.find("public key not found") != -1:
+ if self.keyringIsTrusted: # We trust the ring, but not the key specifically.
+ trustLevel = MARGINAL
+ else:
+ raise portage_exception.InvalidSignature, filename+" (Unknown Signature)"
+ else:
+ raise portage_exception.UnknownCondition, "GPG returned unknown result: %d" % (result)
+
+ if trustLevel >= self.minimumTrust:
+ return True
+ return False
diff --git a/pym/portage/localization.py b/pym/portage/localization.py
new file mode 100644
index 00000000..59ccea71
--- /dev/null
+++ b/pym/portage/localization.py
@@ -0,0 +1,21 @@
+# portage_localization.py -- Code to manage/help portage localization.
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+# We define this to make the transition easier for us.
+def _(mystr):
+ return mystr
+
+
+def localization_example():
+ # Dict references allow translators to rearrange word order.
+ print _("You can use this string for translating.")
+ print _("Strings can be formatted with %(mystr)s like this.") % {"mystr": "VALUES"}
+
+ a_value = "value.of.a"
+ b_value = 123
+ c_value = [1,2,3,4]
+ print _("A: %(a)s -- B: %(b)s -- C: %(c)s") % {"a":a_value,"b":b_value,"c":c_value}
+
diff --git a/pym/portage/locks.py b/pym/portage/locks.py
new file mode 100644
index 00000000..28042e2f
--- /dev/null
+++ b/pym/portage/locks.py
@@ -0,0 +1,312 @@
+# portage: Lock management code
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+import errno, os, stat, time, types
+from portage_exception import InvalidData, DirectoryNotFound, FileNotFound
+from portage_data import portage_gid
+from portage_util import writemsg
+from portage_localization import _
+
+HARDLINK_FD = -2
+
+def lockdir(mydir):
+ return lockfile(mydir,wantnewlockfile=1)
+def unlockdir(mylock):
+ return unlockfile(mylock)
+
+def lockfile(mypath,wantnewlockfile=0,unlinkfile=0):
+ """Creates all dirs upto, the given dir. Creates a lockfile
+ for the given directory as the file: directoryname+'.portage_lockfile'."""
+ import fcntl
+
+ if not mypath:
+ raise InvalidData, "Empty path given"
+
+ if type(mypath) == types.StringType and mypath[-1] == '/':
+ mypath = mypath[:-1]
+
+ if type(mypath) == types.FileType:
+ mypath = mypath.fileno()
+ if type(mypath) == types.IntType:
+ lockfilename = mypath
+ wantnewlockfile = 0
+ unlinkfile = 0
+ elif wantnewlockfile:
+ base, tail = os.path.split(mypath)
+ lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
+ del base, tail
+ unlinkfile = 1
+ else:
+ lockfilename = mypath
+
+ if type(mypath) == types.StringType:
+ if not os.path.exists(os.path.dirname(mypath)):
+ raise DirectoryNotFound, os.path.dirname(mypath)
+ if not os.path.exists(lockfilename):
+ old_mask=os.umask(000)
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR,0660)
+ try:
+ if os.stat(lockfilename).st_gid != portage_gid:
+ os.chown(lockfilename,os.getuid(),portage_gid)
+ except OSError, e:
+ if e[0] == 2: # No such file or directory
+ return lockfile(mypath,wantnewlockfile,unlinkfile)
+ else:
+ writemsg("Cannot chown a lockfile. This could cause inconvenience later.\n");
+ os.umask(old_mask)
+ else:
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR,0660)
+
+ elif type(mypath) == types.IntType:
+ myfd = mypath
+
+ else:
+ raise ValueError, "Unknown type passed in '%s': '%s'" % (type(mypath),mypath)
+
+ # try for a non-blocking lock, if it's held, throw a message
+ # we're waiting on lockfile and use a blocking attempt.
+ locking_method = fcntl.lockf
+ try:
+ fcntl.lockf(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except IOError, e:
+ if "errno" not in dir(e):
+ raise
+ if e.errno == errno.EAGAIN:
+ # resource temp unavailable; eg, someone beat us to the lock.
+ if type(mypath) == types.IntType:
+ print "waiting for lock on fd %i" % myfd
+ else:
+ print "waiting for lock on %s" % lockfilename
+ # try for the exclusive lock now.
+ fcntl.lockf(myfd,fcntl.LOCK_EX)
+ elif e.errno == errno.ENOLCK:
+ # We're not allowed to lock on this FS.
+ os.close(myfd)
+ link_success = False
+ if lockfilename == str(lockfilename):
+ if wantnewlockfile:
+ try:
+ if os.stat(lockfilename)[stat.ST_NLINK] == 1:
+ os.unlink(lockfilename)
+ except OSError:
+ pass
+ link_success = hardlink_lockfile(lockfilename)
+ if not link_success:
+ raise
+ locking_method = None
+ myfd = HARDLINK_FD
+ else:
+ raise
+
+
+ if type(lockfilename) == types.StringType and \
+ myfd != HARDLINK_FD and os.fstat(myfd).st_nlink == 0:
+ # The file was deleted on us... Keep trying to make one...
+ os.close(myfd)
+ writemsg("lockfile recurse\n",1)
+ lockfilename,myfd,unlinkfile,locking_method = lockfile(mypath,wantnewlockfile,unlinkfile)
+
+ writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1)
+ return (lockfilename,myfd,unlinkfile,locking_method)
+
+def unlockfile(mytuple):
+ import fcntl
+
+ #XXX: Compatability hack.
+ if len(mytuple) == 3:
+ lockfilename,myfd,unlinkfile = mytuple
+ locking_method = fcntl.flock
+ elif len(mytuple) == 4:
+ lockfilename,myfd,unlinkfile,locking_method = mytuple
+ else:
+ raise InvalidData
+
+ if(myfd == HARDLINK_FD):
+ unhardlink_lockfile(lockfilename)
+ return True
+
+ # myfd may be None here due to myfd = mypath in lockfile()
+ if type(lockfilename) == types.StringType and not os.path.exists(lockfilename):
+ writemsg("lockfile does not exist '%s'\n" % lockfilename,1)
+ if myfd is not None:
+ os.close(myfd)
+ return False
+
+ try:
+ if myfd is None:
+ myfd = os.open(lockfilename, os.O_WRONLY,0660)
+ unlinkfile = 1
+ locking_method(myfd,fcntl.LOCK_UN)
+ except OSError:
+ if type(lockfilename) == types.StringType:
+ os.close(myfd)
+ raise IOError, "Failed to unlock file '%s'\n" % lockfilename
+
+ try:
+ # This sleep call was added to allow other processes that are
+ # waiting for a lock to be able to grab it before it is deleted.
+ # lockfile() already accounts for this situation, however, and
+ # the sleep here adds more time than is saved overall, so am
+ # commenting until it is proved necessary.
+ #time.sleep(0.0001)
+ if unlinkfile:
+ locking_method(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ # We won the lock, so there isn't competition for it.
+ # We can safely delete the file.
+ writemsg("Got the lockfile...\n",1)
+ if os.fstat(myfd).st_nlink == 1:
+ os.unlink(lockfilename)
+ writemsg("Unlinked lockfile...\n",1)
+ locking_method(myfd,fcntl.LOCK_UN)
+ else:
+ writemsg("lockfile does not exist '%s'\n" % lockfilename,1)
+ os.close(myfd)
+ return False
+ except Exception, e:
+ writemsg("Failed to get lock... someone took it.\n",1)
+ writemsg(str(e)+"\n",1)
+
+ # why test lockfilename? because we may have been handed an
+ # fd originally, and the caller might not like having their
+ # open fd closed automatically on them.
+ if type(lockfilename) == types.StringType:
+ os.close(myfd)
+
+ return True
+
+
+
+
+def hardlock_name(path):
+ return path+".hardlock-"+os.uname()[1]+"-"+str(os.getpid())
+
+def hardlink_is_mine(link,lock):
+ try:
+ return os.stat(link).st_nlink == 2
+ except OSError:
+ return False
+
+def hardlink_lockfile(lockfilename, max_wait=14400):
+ """Does the NFS, hardlink shuffle to ensure locking on the disk.
+ We create a PRIVATE lockfile, that is just a placeholder on the disk.
+ Then we HARDLINK the real lockfile to that private file.
+ If our file can 2 references, then we have the lock. :)
+ Otherwise we lather, rise, and repeat.
+ We default to a 4 hour timeout.
+ """
+
+ start_time = time.time()
+ myhardlock = hardlock_name(lockfilename)
+ reported_waiting = False
+
+ while(time.time() < (start_time + max_wait)):
+ # We only need it to exist.
+ myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0660)
+ os.close(myfd)
+
+ if not os.path.exists(myhardlock):
+ raise FileNotFound, _("Created lockfile is missing: %(filename)s") % {"filename":myhardlock}
+
+ try:
+ res = os.link(myhardlock, lockfilename)
+ except OSError:
+ pass
+
+ if hardlink_is_mine(myhardlock, lockfilename):
+ # We have the lock.
+ if reported_waiting:
+ print
+ return True
+
+ if reported_waiting:
+ writemsg(".")
+ else:
+ reported_waiting = True
+ print
+ print "Waiting on (hardlink) lockfile: (one '.' per 3 seconds)"
+ print "This is a feature to prevent distfiles corruption."
+ print "/usr/lib/portage/bin/clean_locks can fix stuck locks."
+ print "Lockfile: " + lockfilename
+ time.sleep(3)
+
+ os.unlink(myhardlock)
+ return False
+
+def unhardlink_lockfile(lockfilename):
+ myhardlock = hardlock_name(lockfilename)
+ if hardlink_is_mine(myhardlock, lockfilename):
+ # Make sure not to touch lockfilename unless we really have a lock.
+ try:
+ os.unlink(lockfilename)
+ except OSError:
+ pass
+ try:
+ os.unlink(myhardlock)
+ except OSError:
+ pass
+
+def hardlock_cleanup(path, remove_all_locks=False):
+ mypid = str(os.getpid())
+ myhost = os.uname()[1]
+ mydl = os.listdir(path)
+
+ results = []
+ mycount = 0
+
+ mylist = {}
+ for x in mydl:
+ if os.path.isfile(path+"/"+x):
+ parts = x.split(".hardlock-")
+ if len(parts) == 2:
+ filename = parts[0]
+ hostpid = parts[1].split("-")
+ host = "-".join(hostpid[:-1])
+ pid = hostpid[-1]
+
+ if not mylist.has_key(filename):
+ mylist[filename] = {}
+ if not mylist[filename].has_key(host):
+ mylist[filename][host] = []
+ mylist[filename][host].append(pid)
+
+ mycount += 1
+
+
+ results.append("Found %(count)s locks" % {"count":mycount})
+
+ for x in mylist.keys():
+ if mylist[x].has_key(myhost) or remove_all_locks:
+ mylockname = hardlock_name(path+"/"+x)
+ if hardlink_is_mine(mylockname, path+"/"+x) or \
+ not os.path.exists(path+"/"+x) or \
+ remove_all_locks:
+ for y in mylist[x].keys():
+ for z in mylist[x][y]:
+ filename = path+"/"+x+".hardlock-"+y+"-"+z
+ if filename == mylockname:
+ continue
+ try:
+ # We're sweeping through, unlinking everyone's locks.
+ os.unlink(filename)
+ results.append(_("Unlinked: ") + filename)
+ except OSError:
+ pass
+ try:
+ os.unlink(path+"/"+x)
+ results.append(_("Unlinked: ") + path+"/"+x)
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except OSError:
+ pass
+ else:
+ try:
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except OSError:
+ pass
+
+ return results
+
diff --git a/pym/portage/mail.py b/pym/portage/mail.py
new file mode 100644
index 00000000..99ed77fd
--- /dev/null
+++ b/pym/portage/mail.py
@@ -0,0 +1,89 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: portage.py 3483 2006-06-10 21:40:40Z genone $
+
+import portage_exception, socket, smtplib, os, sys, time
+from email.MIMEText import MIMEText as TextMessage
+from email.MIMEMultipart import MIMEMultipart as MultipartMessage
+from email.MIMEBase import MIMEBase as BaseMessage
+
+def create_message(sender, recipient, subject, body, attachments=None):
+ if attachments == None:
+ mymessage = TextMessage(body)
+ else:
+ mymessage = MultipartMessage()
+ mymessage.attach(TextMessage(body))
+ for x in attachments:
+ if isinstance(x, BaseMessage):
+ mymessage.attach(x)
+ elif isinstance(x, str):
+ mymessage.attach(TextMessage(x))
+ else:
+ raise portage_exception.PortageException("Can't handle type of attachment: %s" % type(x))
+
+ mymessage.set_unixfrom(sender)
+ mymessage["To"] = recipient
+ mymessage["From"] = sender
+ mymessage["Subject"] = subject
+ mymessage["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S %z")
+
+ return mymessage
+
+def send_mail(mysettings, message):
+ mymailhost = "localhost"
+ mymailport = 25
+ mymailuser = ""
+ mymailpasswd = ""
+ myrecipient = "root@localhost"
+
+ # Syntax for PORTAGE_ELOG_MAILURI (if defined):
+ # adress [[user:passwd@]mailserver[:port]]
+ # where adress: recipient adress
+ # user: username for smtp auth (defaults to none)
+ # passwd: password for smtp auth (defaults to none)
+ # mailserver: smtp server that should be used to deliver the mail (defaults to localhost)
+ # alternatively this can also be the absolute path to a sendmail binary if you don't want to use smtp
+ # port: port to use on the given smtp server (defaults to 25, values > 100000 indicate that starttls should be used on (port-100000))
+ if " " in mysettings["PORTAGE_ELOG_MAILURI"]:
+ myrecipient, mymailuri = mysettings["PORTAGE_ELOG_MAILURI"].split()
+ if "@" in mymailuri:
+ myauthdata, myconndata = mymailuri.rsplit("@", 1)
+ try:
+ mymailuser,mymailpasswd = myauthdata.split(":")
+ except ValueError:
+ print "!!! invalid SMTP AUTH configuration, trying unauthenticated ..."
+ else:
+ myconndata = mymailuri
+ if ":" in myconndata:
+ mymailhost,mymailport = myconndata.split(":")
+ else:
+ mymailhost = myconndata
+ else:
+ myrecipient = mysettings["PORTAGE_ELOG_MAILURI"]
+
+ myfrom = message.get("From")
+
+ # user wants to use a sendmail binary instead of smtp
+ if mymailhost[0] == os.sep and os.path.exists(mymailhost):
+ fd = os.popen(mymailhost+" -f "+myfrom+" "+myrecipient, "w")
+ fd.write(message.as_string())
+ if fd.close() != None:
+ sys.stderr.write("!!! %s returned with a non-zero exit code. This generally indicates an error.\n" % mymailhost)
+ else:
+ try:
+ if int(mymailport) > 100000:
+ myconn = smtplib.SMTP(mymailhost, int(mymailport) - 100000)
+ myconn.starttls()
+ else:
+ myconn = smtplib.SMTP(mymailhost, mymailport)
+ if mymailuser != "" and mymailpasswd != "":
+ myconn.login(mymailuser, mymailpasswd)
+ myconn.sendmail(myfrom, myrecipient, message.as_string())
+ myconn.quit()
+ except smtplib.SMTPException, e:
+ raise portage_exception.PortageException("!!! An error occured while trying to send logmail:\n"+str(e))
+ except socket.error, e:
+ raise portage_exception.PortageException("!!! A network error occured while trying to send logmail:\n"+str(e)+"\nSure you configured PORTAGE_ELOG_MAILURI correctly?")
+ return
+
diff --git a/pym/portage/manifest.py b/pym/portage/manifest.py
new file mode 100644
index 00000000..e621606c
--- /dev/null
+++ b/pym/portage/manifest.py
@@ -0,0 +1,618 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+import errno, os, sets
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+
+import portage_exception, portage_versions, portage_const
+from portage_checksum import *
+from portage_exception import *
+from portage_util import write_atomic
+
+class FileNotInManifestException(PortageException):
+ pass
+
+def manifest2AuxfileFilter(filename):
+ filename = filename.strip(os.sep)
+ mysplit = filename.split(os.path.sep)
+ if "CVS" in mysplit:
+ return False
+ for x in mysplit:
+ if x.startswith("."):
+ return False
+ return not filename.startswith("digest-")
+
+def manifest2MiscfileFilter(filename):
+ filename = filename.strip(os.sep)
+ return not (filename in ["CVS", ".svn", "files", "Manifest"] or filename.endswith(".ebuild"))
+
+def guessManifestFileType(filename):
+ """ Perform a best effort guess of which type the given filename is, avoid using this if possible """
+ if filename.startswith("files" + os.sep + "digest-"):
+ return None
+ if filename.startswith("files" + os.sep):
+ return "AUX"
+ elif filename.endswith(".ebuild"):
+ return "EBUILD"
+ elif filename in ["ChangeLog", "metadata.xml"]:
+ return "MISC"
+ else:
+ return "DIST"
+
+def parseManifest2(mysplit):
+ myentry = None
+ if len(mysplit) > 4 and mysplit[0] in portage_const.MANIFEST2_IDENTIFIERS:
+ mytype = mysplit[0]
+ myname = mysplit[1]
+ mysize = int(mysplit[2])
+ myhashes = dict(zip(mysplit[3::2], mysplit[4::2]))
+ myhashes["size"] = mysize
+ myentry = Manifest2Entry(type=mytype, name=myname, hashes=myhashes)
+ return myentry
+
+def parseManifest1(mysplit):
+ myentry = None
+ if len(mysplit) == 4 and mysplit[0] in ["size"] + portage_const.MANIFEST1_HASH_FUNCTIONS:
+ myname = mysplit[2]
+ mytype = None
+ mytype = guessManifestFileType(myname)
+ if mytype == "AUX":
+ if myname.startswith("files" + os.path.sep):
+ myname = myname[6:]
+ mysize = int(mysplit[3])
+ myhashes = {mysplit[0]: mysplit[1]}
+ myhashes["size"] = mysize
+ myentry = Manifest1Entry(type=mytype, name=myname, hashes=myhashes)
+ return myentry
+
+class ManifestEntry(object):
+ __slots__ = ("type", "name", "hashes")
+ def __init__(self, **kwargs):
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
+ def __cmp__(self, other):
+ if str(self) == str(other):
+ return 0
+ return 1
+
+class Manifest1Entry(ManifestEntry):
+ def __str__(self):
+ myhashkeys = self.hashes.keys()
+ for hashkey in myhashkeys:
+ if hashkey != "size":
+ break
+ hashvalue = self.hashes[hashkey]
+ myname = self.name
+ if self.type == "AUX" and not myname.startswith("files" + os.sep):
+ myname = os.path.join("files", myname)
+ return " ".join([hashkey, str(hashvalue), myname, str(self.hashes["size"])])
+
+class Manifest2Entry(ManifestEntry):
+ def __str__(self):
+ myline = " ".join([self.type, self.name, str(self.hashes["size"])])
+ myhashkeys = self.hashes.keys()
+ myhashkeys.remove("size")
+ myhashkeys.sort()
+ for h in myhashkeys:
+ myline += " " + h + " " + str(self.hashes[h])
+ return myline
+
+class Manifest(object):
+ parsers = (parseManifest2, parseManifest1)
+ def __init__(self, pkgdir, distdir, fetchlist_dict=None,
+ manifest1_compat=True, from_scratch=False):
+ """ create new Manifest instance for package in pkgdir
+ and add compability entries for old portage versions if manifest1_compat == True.
+ Do not parse Manifest file if from_scratch == True (only for internal use)
+ The fetchlist_dict parameter is required only for generation of
+ a Manifest (not needed for parsing and checking sums)."""
+ self.pkgdir = pkgdir.rstrip(os.sep) + os.sep
+ self.fhashdict = {}
+ self.hashes = portage_const.MANIFEST2_HASH_FUNCTIONS[:]
+ self.hashes.append("size")
+ if manifest1_compat:
+ self.hashes.extend(portage_const.MANIFEST1_HASH_FUNCTIONS)
+ self.hashes = sets.Set(self.hashes)
+ for t in portage_const.MANIFEST2_IDENTIFIERS:
+ self.fhashdict[t] = {}
+ if not from_scratch:
+ self._read()
+ self.compat = manifest1_compat
+ if fetchlist_dict != None:
+ self.fetchlist_dict = fetchlist_dict
+ else:
+ self.fetchlist_dict = {}
+ self.distdir = distdir
+ self.guessType = guessManifestFileType
+
+ def getFullname(self):
+ """ Returns the absolute path to the Manifest file for this instance """
+ return os.path.join(self.pkgdir, "Manifest")
+
+ def getDigests(self):
+ """ Compability function for old digest/manifest code, returns dict of filename:{hashfunction:hashvalue} """
+ rval = {}
+ for t in portage_const.MANIFEST2_IDENTIFIERS:
+ rval.update(self.fhashdict[t])
+ return rval
+
+ def getTypeDigests(self, ftype):
+ """ Similar to getDigests(), but restricted to files of the given type. """
+ return self.fhashdict[ftype]
+
+ def _readDigests(self, myhashdict=None):
+ """ Parse old style digest files for this Manifest instance """
+ if myhashdict is None:
+ myhashdict = {}
+ try:
+ for d in os.listdir(os.path.join(self.pkgdir, "files")):
+ if d.startswith("digest-"):
+ self._readManifest(os.path.join(self.pkgdir, "files", d), mytype="DIST",
+ myhashdict=myhashdict)
+ except (IOError, OSError), e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+ return myhashdict
+
+ def _readManifest(self, file_path, myhashdict=None, **kwargs):
+ """Parse a manifest or an old style digest. If myhashdict is given
+ then data will be added too it. Otherwise, a new dict will be created
+ and returned."""
+ try:
+ fd = open(file_path, "r")
+ if myhashdict is None:
+ myhashdict = {}
+ self._parseDigests(fd, myhashdict=myhashdict, **kwargs)
+ fd.close()
+ return myhashdict
+ except (OSError, IOError), e:
+ if e.errno == errno.ENOENT:
+ raise FileNotFound(file_path)
+ else:
+ raise
+
+ def _read(self):
+ """ Parse Manifest file for this instance """
+ try:
+ self._readManifest(self.getFullname(), myhashdict=self.fhashdict)
+ except FileNotFound:
+ pass
+ self._readDigests(myhashdict=self.fhashdict)
+
+
+ def _parseManifestLines(self, mylines):
+ """Parse manifest lines and return a list of manifest entries."""
+ for myline in mylines:
+ myentry = None
+ mysplit = myline.split()
+ for parser in self.parsers:
+ myentry = parser(mysplit)
+ if myentry is not None:
+ yield myentry
+ break # go to the next line
+
+ def _parseDigests(self, mylines, myhashdict=None, mytype=None):
+ """Parse manifest entries and store the data in myhashdict. If mytype
+ is specified, it will override the type for all parsed entries."""
+ if myhashdict is None:
+ myhashdict = {}
+ for myentry in self._parseManifestLines(mylines):
+ if mytype is None:
+ myentry_type = myentry.type
+ else:
+ myentry_type = mytype
+ myhashdict.setdefault(myentry_type, {})
+ myhashdict[myentry_type].setdefault(myentry.name, {})
+ myhashdict[myentry_type][myentry.name].update(myentry.hashes)
+ return myhashdict
+
+ def _writeDigests(self, force=False):
+ """ Create old style digest files for this Manifest instance """
+ cpvlist = [os.path.join(self._pkgdir_category(), x[:-7]) for x in os.listdir(self.pkgdir) if x.endswith(".ebuild")]
+ rval = []
+ try:
+ os.makedirs(os.path.join(self.pkgdir, "files"))
+ except OSError, oe:
+ if oe.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+ for cpv in cpvlist:
+ dname = os.path.join(self.pkgdir, "files", "digest-%s" % self._catsplit(cpv)[1])
+ distlist = self._getCpvDistfiles(cpv)
+ missing_digests = set()
+ for f in distlist:
+ if f not in self.fhashdict["DIST"] or len(self.fhashdict["DIST"][f]) == 0:
+ missing_digests.add(f)
+ if missing_digests:
+ # This allows us to force remove of stale digests for the
+ # ebuild --force digest option.
+ distlist = [f for f in distlist if f not in missing_digests]
+ update_digest = True
+ if not force:
+ try:
+ f = open(dname, "r")
+ old_data = self._parseDigests(f)
+ f.close()
+ if len(old_data) == 1 and "DIST" in old_data:
+ new_data = self._getDigestData(distlist)
+ if "DIST" in new_data:
+ for myfile in new_data["DIST"]:
+ for hashname in \
+ new_data["DIST"][myfile].keys():
+ if hashname != "size" and hashname not in \
+ portage_const.MANIFEST1_HASH_FUNCTIONS:
+ del new_data["DIST"][myfile][hashname]
+ if new_data["DIST"] == old_data["DIST"]:
+ update_digest = False
+ except (IOError, OSError), e:
+ if errno.ENOENT == e.errno:
+ pass
+ else:
+ raise
+ if update_digest:
+ mylines = self._createDigestLines1(distlist, self.fhashdict)
+ if mylines:
+ mylines = "\n".join(mylines) + "\n"
+ else:
+ mylines = ""
+ write_atomic(dname, mylines)
+ rval.append(dname)
+ return rval
+
+ def _getDigestData(self, distlist):
+ """create a hash dict for a specific list of files"""
+ myhashdict = {}
+ for myname in distlist:
+ for mytype in self.fhashdict:
+ if myname in self.fhashdict[mytype]:
+ myhashdict.setdefault(mytype, {})
+ myhashdict[mytype].setdefault(myname, {})
+ myhashdict[mytype][myname].update(self.fhashdict[mytype][myname])
+ return myhashdict
+
+ def _createDigestLines1(self, distlist, myhashdict):
+ """ Create an old style digest file."""
+ mylines = []
+ myfiles = myhashdict["DIST"].keys()
+ myfiles.sort()
+ for f in myfiles:
+ if f in distlist:
+ myhashkeys = myhashdict["DIST"][f].keys()
+ myhashkeys.sort()
+ for h in myhashkeys:
+ if h not in portage_const.MANIFEST1_HASH_FUNCTIONS:
+ continue
+ myline = " ".join([h, str(myhashdict["DIST"][f][h]), f, str(myhashdict["DIST"][f]["size"])])
+ mylines.append(myline)
+ return mylines
+
+ def _addDigestsToManifest(self, digests, fd):
+ """ Add entries for old style digest files to Manifest file """
+ mylines = []
+ for dname in digests:
+ myhashes = perform_multiple_checksums(dname, portage_const.MANIFEST1_HASH_FUNCTIONS+["size"])
+ for h in myhashes:
+ mylines.append((" ".join([h, str(myhashes[h]), os.path.join("files", os.path.basename(dname)), str(myhashes["size"])])))
+ fd.write("\n".join(mylines))
+ fd.write("\n")
+
+ def _createManifestEntries(self):
+ mytypes = self.fhashdict.keys()
+ mytypes.sort()
+ for t in mytypes:
+ myfiles = self.fhashdict[t].keys()
+ myfiles.sort()
+ for f in myfiles:
+ myentry = Manifest2Entry(
+ type=t, name=f, hashes=self.fhashdict[t][f].copy())
+ myhashkeys = myentry.hashes.keys()
+ for h in myhashkeys:
+ if h not in ["size"] + portage_const.MANIFEST2_HASH_FUNCTIONS:
+ del myentry.hashes[h]
+ yield myentry
+ if self.compat and t != "DIST":
+ mysize = self.fhashdict[t][f]["size"]
+ myhashes = self.fhashdict[t][f]
+ for h in myhashkeys:
+ if h not in portage_const.MANIFEST1_HASH_FUNCTIONS:
+ continue
+ yield Manifest1Entry(
+ type=t, name=f, hashes={"size":mysize, h:myhashes[h]})
+
+ if self.compat:
+ cvp_list = self.fetchlist_dict.keys()
+ cvp_list.sort()
+ for cpv in cvp_list:
+ digest_path = os.path.join("files", "digest-%s" % self._catsplit(cpv)[1])
+ dname = os.path.join(self.pkgdir, digest_path)
+ try:
+ myhashes = perform_multiple_checksums(dname, portage_const.MANIFEST1_HASH_FUNCTIONS+["size"])
+ myhashkeys = myhashes.keys()
+ myhashkeys.sort()
+ for h in myhashkeys:
+ if h in portage_const.MANIFEST1_HASH_FUNCTIONS:
+ yield Manifest1Entry(type="AUX", name=digest_path,
+ hashes={"size":myhashes["size"], h:myhashes[h]})
+ except FileNotFound:
+ pass
+
+ def write(self, sign=False, force=False):
+ """ Write Manifest instance to disk, optionally signing it """
+ try:
+ if self.compat:
+ self._writeDigests()
+ myentries = list(self._createManifestEntries())
+ update_manifest = True
+ if not force:
+ try:
+ f = open(self.getFullname(), "r")
+ oldentries = list(self._parseManifestLines(f))
+ f.close()
+ if len(oldentries) == len(myentries):
+ update_manifest = False
+ for i in xrange(len(oldentries)):
+ if oldentries[i] != myentries[i]:
+ update_manifest = True
+ break
+ except (IOError, OSError), e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+ if update_manifest:
+ fd = open(self.getFullname(), "w")
+ for myentry in myentries:
+ fd.write("%s\n" % str(myentry))
+ fd.close()
+ if sign:
+ self.sign()
+ except (IOError, OSError), e:
+ if e.errno == errno.EACCES:
+ raise PermissionDenied(str(e))
+ raise
+
+ def sign(self):
+ """ Sign the Manifest """
+ raise NotImplementedError()
+
+ def validateSignature(self):
+ """ Validate signature on Manifest """
+ raise NotImplementedError()
+
+ def addFile(self, ftype, fname, hashdict=None, ignoreMissing=False):
+ """ Add entry to Manifest optionally using hashdict to avoid recalculation of hashes """
+ if ftype == "AUX" and not fname.startswith("files/"):
+ fname = os.path.join("files", fname)
+ if not os.path.exists(self.pkgdir+fname) and not ignoreMissing:
+ raise FileNotFound(fname)
+ if not ftype in portage_const.MANIFEST2_IDENTIFIERS:
+ raise InvalidDataType(ftype)
+ if ftype == "AUX" and fname.startswith("files"):
+ fname = fname[6:]
+ self.fhashdict[ftype][fname] = {}
+ if hashdict != None:
+ self.fhashdict[ftype][fname].update(hashdict)
+ if not portage_const.MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]:
+ self.updateFileHashes(ftype, fname, checkExisting=False, ignoreMissing=ignoreMissing)
+
+ def removeFile(self, ftype, fname):
+ """ Remove given entry from Manifest """
+ del self.fhashdict[ftype][fname]
+
+ def hasFile(self, ftype, fname):
+ """ Return wether the Manifest contains an entry for the given type,filename pair """
+ return (fname in self.fhashdict[ftype])
+
+ def findFile(self, fname):
+ """ Return entrytype of the given file if present in Manifest or None if not present """
+ for t in portage_const.MANIFEST2_IDENTIFIERS:
+ if fname in self.fhashdict[t]:
+ return t
+ return None
+
+ def create(self, checkExisting=False, assumeDistHashesSometimes=False,
+ assumeDistHashesAlways=False, requiredDistfiles=[]):
+ """ Recreate this Manifest from scratch. This will not use any
+ existing checksums unless assumeDistHashesSometimes or
+ assumeDistHashesAlways is true (assumeDistHashesSometimes will only
+ cause DIST checksums to be reused if the file doesn't exist in
+ DISTDIR). The requiredDistfiles parameter specifies a list of
+ distfiles to raise a FileNotFound exception for (if no file or existing
+ checksums are available), and defaults to all distfiles when not
+ specified."""
+ if checkExisting:
+ self.checkAllHashes()
+ if assumeDistHashesSometimes or assumeDistHashesAlways:
+ distfilehashes = self.fhashdict["DIST"]
+ else:
+ distfilehashes = {}
+ self.__init__(self.pkgdir, self.distdir,
+ fetchlist_dict=self.fetchlist_dict, from_scratch=True)
+ for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(self.pkgdir):
+ break
+ for f in pkgdir_files:
+ if f.endswith(".ebuild"):
+ mytype = "EBUILD"
+ elif manifest2MiscfileFilter(f):
+ mytype = "MISC"
+ else:
+ continue
+ self.fhashdict[mytype][f] = perform_multiple_checksums(self.pkgdir+f, self.hashes)
+ recursive_files = []
+ cut_len = len(os.path.join(self.pkgdir, "files") + os.sep)
+ for parentdir, dirs, files in os.walk(os.path.join(self.pkgdir, "files")):
+ for f in files:
+ full_path = os.path.join(parentdir, f)
+ recursive_files.append(full_path[cut_len:])
+ for f in recursive_files:
+ if not manifest2AuxfileFilter(f):
+ continue
+ self.fhashdict["AUX"][f] = perform_multiple_checksums(
+ os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
+ cpvlist = [os.path.join(self._pkgdir_category(), x[:-7]) for x in os.listdir(self.pkgdir) if x.endswith(".ebuild")]
+ distlist = set()
+ for cpv in cpvlist:
+ distlist.update(self._getCpvDistfiles(cpv))
+ if requiredDistfiles is None:
+ # This allows us to force removal of stale digests for the
+ # ebuild --force digest option (no distfiles are required).
+ requiredDistfiles = set()
+ elif len(requiredDistfiles) == 0:
+ # repoman passes in an empty list, which implies that all distfiles
+ # are required.
+ requiredDistfiles = distlist.copy()
+ for f in distlist:
+ fname = os.path.join(self.distdir, f)
+ mystat = None
+ try:
+ mystat = os.stat(fname)
+ except OSError:
+ pass
+ if f in distfilehashes and \
+ ((assumeDistHashesSometimes and mystat is None) or \
+ (assumeDistHashesAlways and mystat is None) or \
+ (assumeDistHashesAlways and mystat is not None and \
+ len(distfilehashes[f]) == len(self.hashes) and \
+ distfilehashes[f]["size"] == mystat.st_size)):
+ self.fhashdict["DIST"][f] = distfilehashes[f]
+ else:
+ try:
+ self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes)
+ except FileNotFound:
+ if f in requiredDistfiles:
+ raise
+
+ def _pkgdir_category(self):
+ return self.pkgdir.rstrip(os.sep).split(os.sep)[-2]
+
+ def _getAbsname(self, ftype, fname):
+ if ftype == "DIST":
+ absname = os.path.join(self.distdir, fname)
+ elif ftype == "AUX":
+ absname = os.path.join(self.pkgdir, "files", fname)
+ else:
+ absname = os.path.join(self.pkgdir, fname)
+ return absname
+
+ def checkAllHashes(self, ignoreMissingFiles=False):
+ for t in portage_const.MANIFEST2_IDENTIFIERS:
+ self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
+
+ def checkTypeHashes(self, idtype, ignoreMissingFiles=False):
+ for f in self.fhashdict[idtype]:
+ self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles)
+
+ def checkFileHashes(self, ftype, fname, ignoreMissing=False):
+ myhashes = self.fhashdict[ftype][fname]
+ try:
+ ok,reason = verify_all(self._getAbsname(ftype, fname), self.fhashdict[ftype][fname])
+ if not ok:
+ raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason)))
+ return ok, reason
+ except FileNotFound, e:
+ if not ignoreMissing:
+ raise
+ return False, "File Not Found: '%s'" % str(e)
+
+ def checkCpvHashes(self, cpv, checkDistfiles=True, onlyDistfiles=False, checkMiscfiles=False):
+ """ check the hashes for all files associated to the given cpv, include all
+ AUX files and optionally all MISC files. """
+ if not onlyDistfiles:
+ self.checkTypeHashes("AUX", ignoreMissingFiles=False)
+ if checkMiscfiles:
+ self.checkTypeHashes("MISC", ignoreMissingFiles=False)
+ ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+ self.checkFileHashes("EBUILD", ebuildname, ignoreMissing=False)
+ if checkDistfiles or onlyDistfiles:
+ for f in self._getCpvDistfiles(cpv):
+ self.checkFileHashes("DIST", f, ignoreMissing=False)
+
+ def _getCpvDistfiles(self, cpv):
+ """ Get a list of all DIST files associated to the given cpv """
+ return self.fetchlist_dict[cpv]
+
+ def getDistfilesSize(self, fetchlist):
+ total_bytes = 0
+ for f in fetchlist:
+ total_bytes += int(self.fhashdict["DIST"][f]["size"])
+ return total_bytes
+
+ def updateFileHashes(self, ftype, fname, checkExisting=True, ignoreMissing=True, reuseExisting=False):
+ """ Regenerate hashes for the given file """
+ if checkExisting:
+ self.checkFileHashes(ftype, fname, ignoreMissing=ignoreMissing)
+ if not ignoreMissing and not self.fhashdict[ftype].has_key(fname):
+ raise FileNotInManifestException(fname)
+ if not self.fhashdict[ftype].has_key(fname):
+ self.fhashdict[ftype][fname] = {}
+ myhashkeys = list(self.hashes)
+ if reuseExisting:
+ for k in [h for h in self.fhashdict[ftype][fname] if h in myhashkeys]:
+ myhashkeys.remove(k)
+ myhashes = perform_multiple_checksums(self._getAbsname(ftype, fname), myhashkeys)
+ self.fhashdict[ftype][fname].update(myhashes)
+
+ def updateTypeHashes(self, idtype, checkExisting=False, ignoreMissingFiles=True):
+ """ Regenerate all hashes for all files of the given type """
+ for fname in self.fhashdict[idtype]:
+ self.updateFileHashes(idtype, fname, checkExisting)
+
+ def updateAllHashes(self, checkExisting=False, ignoreMissingFiles=True):
+ """ Regenerate all hashes for all files in this Manifest. """
+ for ftype in portage_const.MANIFEST2_IDENTIFIERS:
+ self.updateTypeHashes(idtype, fname, checkExisting)
+
+ def updateCpvHashes(self, cpv, ignoreMissingFiles=True):
+ """ Regenerate all hashes associated to the given cpv (includes all AUX and MISC
+ files)."""
+ self.updateTypeHashes("AUX", ignoreMissingFiles=ignoreMissingFiles)
+ self.updateTypeHashes("MISC", ignoreMissingFiles=ignoreMissingFiles)
+ ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+ self.updateFileHashes("EBUILD", ebuildname, ignoreMissingFiles=ignoreMissingFiles)
+ for f in self._getCpvDistfiles(cpv):
+ self.updateFileHashes("DIST", f, ignoreMissingFiles=ignoreMissingFiles)
+
+ def updateHashesGuessType(self, fname, *args, **kwargs):
+ """ Regenerate hashes for the given file (guesses the type and then
+ calls updateFileHashes)."""
+ mytype = self.guessType(fname)
+ if mytype == "AUX":
+ fname = fname[len("files" + os.sep):]
+ elif mytype is None:
+ return
+ myrealtype = self.findFile(fname)
+ if myrealtype is not None:
+ mytype = myrealtype
+ return self.updateFileHashes(mytype, fname, *args, **kwargs)
+
+ def getFileData(self, ftype, fname, key):
+ """ Return the value of a specific (type,filename,key) triple, mainly useful
+ to get the size for distfiles."""
+ return self.fhashdict[ftype][fname][key]
+
+ def getVersions(self):
+ """ Returns a list of manifest versions present in the manifest file. """
+ rVal = []
+ mfname = self.getFullname()
+ if not os.path.exists(mfname):
+ return rVal
+ myfile = open(mfname, "r")
+ lines = myfile.readlines()
+ myfile.close()
+ for l in lines:
+ mysplit = l.split()
+ if len(mysplit) == 4 and mysplit[0] in portage_const.MANIFEST1_HASH_FUNCTIONS and not 1 in rVal:
+ rVal.append(1)
+ elif len(mysplit) > 4 and mysplit[0] in portage_const.MANIFEST2_IDENTIFIERS and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal:
+ rVal.append(2)
+ return rVal
+
+ def _catsplit(self, pkg_key):
+ """Split a category and package, returning a list of [cat, pkg].
+ This is compatible with portage.catsplit()"""
+ return pkg_key.split("/", 1)
diff --git a/pym/portage/news.py b/pym/portage/news.py
new file mode 100644
index 00000000..b54261d9
--- /dev/null
+++ b/pym/portage/news.py
@@ -0,0 +1,268 @@
+# portage: news management code
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+from portage_const import INCREMENTALS, PROFILE_PATH, NEWS_LIB_PATH
+from portage import config, vartree, vardbapi, portdbapi
+from portage_util import ensure_dirs, apply_permissions
+from portage_data import portage_gid
+from portage_locks import lockfile, unlockfile, lockdir, unlockdir
+from portage_exception import FileNotFound
+import os, re
+
+class NewsManager(object):
+ """
+ This object manages GLEP 42 style news items. It will cache news items
+ that have previously shown up and notify users when there are relevant news
+ items that apply to their packages that the user has not previously read.
+
+ Creating a news manager requires:
+ root - typically ${ROOT} see man make.conf and man emerge for details
+ NEWS_PATH - path to news items; usually $REPODIR/metadata/news
+ UNREAD_PATH - path to the news.repoid.unread file; this helps us track news items
+
+ """
+
+ TIMESTAMP_FILE = "news-timestamp"
+
+ def __init__( self, root, NEWS_PATH, UNREAD_PATH, LANGUAGE_ID='en' ):
+ self.NEWS_PATH = NEWS_PATH
+ self.UNREAD_PATH = UNREAD_PATH
+ self.TIMESTAMP_PATH = os.path.join( root, NEWS_LIB_PATH, NewsManager.TIMESTAMP_FILE )
+ self.target_root = root
+ self.LANGUAGE_ID = LANGUAGE_ID
+ self.config = config( config_root = os.environ.get("PORTAGE_CONFIGROOT", "/"),
+ target_root = root, config_incrementals = INCREMENTALS)
+ self.vdb = vardbapi( settings = self.config, root = root,
+ vartree = vartree( root = root, settings = self.config ) )
+ self.portdb = portdbapi( porttree_root = self.config["PORTDIR"], mysettings = self.config )
+
+ # Ensure that the unread path exists and is writable.
+ dirmode = 02070
+ modemask = 02
+ ensure_dirs(self.UNREAD_PATH, mode=dirmode, mask=modemask, gid=portage_gid)
+
+ def updateItems( self, repoid ):
+ """
+ Figure out which news items from NEWS_PATH are both unread and relevant to
+ the user (according to the GLEP 42 standards of relevancy). Then add these
+ items into the news.repoid.unread file.
+ """
+
+ repos = self.portdb.getRepositories()
+ if repoid not in repos:
+ raise ValueError("Invalid repoID: %s" % repoid)
+
+ timestamp_file = self.TIMESTAMP_PATH + repoid
+ if os.path.exists(timestamp_file):
+ # Make sure the timestamp has correct permissions.
+ apply_permissions( filename=timestamp_file,
+ uid=self.config["PORTAGE_INST_UID"], gid=portage_gid, mode=664 )
+ timestamp = os.stat(timestamp_file).st_mtime
+ else:
+ timestamp = 0
+
+ path = os.path.join( self.portdb.getRepositoryPath( repoid ), self.NEWS_PATH )
+ newsdir_lock = None
+ try:
+ newsdir_lock = lockdir( self.portdb.getRepositoryPath(repoid) )
+ # Skip reading news for repoid if the news dir does not exist. Requested by
+ # NightMorph :)
+ if not os.path.exists( path ):
+ return None
+ news = os.listdir( path )
+ updates = []
+ for item in news:
+ try:
+ file = os.path.join( path, item, item + "." + self.LANGUAGE_ID + ".txt")
+ tmp = NewsItem( file , timestamp )
+ except TypeError:
+ continue
+
+ if tmp.isRelevant( profile=os.readlink(PROFILE_PATH), config=config, vardb=self.vdb):
+ updates.append( tmp )
+ finally:
+ if newsdir_lock:
+ unlockdir(newsdir_lock)
+
+ del path
+
+ path = os.path.join( self.UNREAD_PATH, "news-" + repoid + ".unread" )
+ try:
+ unread_lock = lockfile( path )
+ if not os.path.exists( path ):
+ #create the file if it does not exist
+ open( path, "w" )
+ # Ensure correct perms on the unread file.
+ apply_permissions( filename=path,
+ uid=self.config["PORTAGE_INST_UID"], gid=portage_gid, mode=664 )
+ # Make sure we have the correct permissions when created
+ unread_file = open( path, "a" )
+
+ for item in updates:
+ unread_file.write( item.path + "\n" )
+ unread_file.close()
+ finally:
+ unlockfile(unread_lock)
+
+ # Touch the timestamp file
+ f = open(timestamp_file, "w")
+ f.close()
+
+ def getUnreadItems( self, repoid, update=False ):
+ """
+ Determine if there are unread relevant items in news.repoid.unread.
+ If there are unread items return their number.
+ If update is specified, updateNewsItems( repoid ) will be called to
+ check for new items.
+ """
+
+ if update:
+ self.updateItems( repoid )
+
+ unreadfile = os.path.join( self.UNREAD_PATH, "news-"+ repoid +".unread" )
+ try:
+ try:
+ unread_lock = lockfile(unreadfile)
+ # Set correct permissions on the news-repoid.unread file
+ apply_permissions( filename=unreadfile,
+ uid=int(self.config["PORTAGE_INST_UID"]), gid=portage_gid, mode=0664 )
+
+ if os.path.exists( unreadfile ):
+ unread = open( unreadfile ).readlines()
+ if len(unread):
+ return len(unread)
+ except FileNotFound:
+ pass # unread file may not exist
+ finally:
+ if unread_lock:
+ unlockfile(unread_lock)
+
+_installedRE = re.compile("Display-If-Installed:(.*)\n")
+_profileRE = re.compile("Display-If-Profile:(.*)\n")
+_keywordRE = re.compile("Display-If-Keyword:(.*)\n")
+
+class NewsItem(object):
+ """
+ This class encapsulates a GLEP 42 style news item.
+ It's purpose is to wrap parsing of these news items such that portage can determine
+ whether a particular item is 'relevant' or not. This requires parsing the item
+ and determining 'relevancy restrictions'; these include "Display if Installed" or
+ "display if arch: x86" and so forth.
+
+ Creation of a news item involves passing in the path to the particular news item.
+
+ """
+
+ def __init__( self, path, cache_mtime = 0 ):
+ """
+ For a given news item we only want if it path is a file and it's
+ mtime is newer than the cache'd timestamp.
+ """
+ if not os.path.isfile( path ):
+ raise TypeError
+ if not os.stat( path ).st_mtime > cache_mtime:
+ raise TypeError
+ self.path = path
+ self._parsed = False
+
+ def isRelevant( self, vardb, config, profile ):
+ """
+ This function takes a dict of keyword arguments; one should pass in any
+ objects need to do to lookups (like what keywords we are on, what profile,
+ and a vardb so we can look at installed packages).
+ Each restriction will pluck out the items that are required for it to match
+ or raise a ValueError exception if the required object is not present.
+ """
+
+ if not len(self.restrictions):
+ return True # no restrictions to match means everyone should see it
+
+ kwargs = { 'vardb' : vardb,
+ 'config' : config,
+ 'profile' : profile }
+
+ for restriction in self.restrictions:
+ if restriction.checkRestriction( **kwargs ):
+ return True
+
+ return False # No restrictions were met; thus we aren't relevant :(
+
+ def parse( self ):
+ lines = open(self.path).readlines()
+ self.restrictions = []
+ for line in lines:
+ #Optimization to ignore regex matchines on lines that
+ #will never match
+ if not line.startswith("D"):
+ continue
+ restricts = { _installedRE : DisplayInstalledRestriction,
+ _profileRE : DisplayProfileRestriction,
+ _keywordRE : DisplayKeywordRestriction }
+ for regex, restriction in restricts.iteritems():
+ match = regex.match(line)
+ if match:
+ self.restrictions.append( restriction( match.groups()[0].strip() ) )
+ continue
+ self._parsed = True
+
+ def __getattr__( self, attr ):
+ if not self._parsed:
+ self.parse()
+ return self.__dict__[attr]
+
+class DisplayRestriction(object):
+ """
+ A base restriction object representing a restriction of display.
+ news items may have 'relevancy restrictions' preventing them from
+ being important. In this case we need a manner of figuring out if
+ a particular item is relevant or not. If any of it's restrictions
+ are met, then it is displayed
+ """
+
+ def checkRestriction( self, **kwargs ):
+ raise NotImplementedError("Derived class should over-ride this method")
+
+class DisplayProfileRestriction(DisplayRestriction):
+ """
+ A profile restriction where a particular item shall only be displayed
+ if the user is running a specific profile.
+ """
+
+ def __init__( self, profile ):
+ self.profile = profile
+
+ def checkRestriction( self, **kwargs ):
+ if self.profile == kwargs['profile']:
+ return True
+ return False
+
+class DisplayKeywordRestriction(DisplayRestriction):
+ """
+ A keyword restriction where a particular item shall only be displayed
+ if the user is running a specific keyword.
+ """
+
+ def __init__( self, keyword ):
+ self.keyword = keyword
+
+ def checkRestriction( self, **kwargs ):
+ if kwargs['config']["ARCH"] == self.keyword:
+ return True
+ return False
+
+class DisplayInstalledRestriction(DisplayRestriction):
+ """
+ An Installation restriction where a particular item shall only be displayed
+ if the user has that item installed.
+ """
+
+ def __init__( self, cpv ):
+ self.cpv = cpv
+
+ def checkRestriction( self, **kwargs ):
+ vdb = kwargs['vardb']
+ if vdb.match( self.cpv ):
+ return True
+ return False
diff --git a/pym/portage/output.py b/pym/portage/output.py
new file mode 100644
index 00000000..62ec975f
--- /dev/null
+++ b/pym/portage/output.py
@@ -0,0 +1,393 @@
+# Copyright 1998-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+__docformat__ = "epytext"
+
+import commands,errno,os,re,shlex,sys
+from portage_const import COLOR_MAP_FILE
+from portage_util import writemsg
+from portage_exception import PortageException, ParseError, PermissionDenied, FileNotFound
+
+havecolor=1
+dotitles=1
+
+esc_seq = "\x1b["
+
+g_attr = {}
+g_attr["normal"] = 0
+
+g_attr["bold"] = 1
+g_attr["faint"] = 2
+g_attr["standout"] = 3
+g_attr["underline"] = 4
+g_attr["blink"] = 5
+g_attr["overline"] = 6 # Why is overline actually useful?
+g_attr["reverse"] = 7
+g_attr["invisible"] = 8
+
+g_attr["no-attr"] = 22
+g_attr["no-standout"] = 23
+g_attr["no-underline"] = 24
+g_attr["no-blink"] = 25
+g_attr["no-overline"] = 26
+g_attr["no-reverse"] = 27
+# 28 isn't defined?
+# 29 isn't defined?
+g_attr["black"] = 30
+g_attr["red"] = 31
+g_attr["green"] = 32
+g_attr["yellow"] = 33
+g_attr["blue"] = 34
+g_attr["magenta"] = 35
+g_attr["cyan"] = 36
+g_attr["white"] = 37
+# 38 isn't defined?
+g_attr["default"] = 39
+g_attr["bg_black"] = 40
+g_attr["bg_red"] = 41
+g_attr["bg_green"] = 42
+g_attr["bg_yellow"] = 43
+g_attr["bg_blue"] = 44
+g_attr["bg_magenta"] = 45
+g_attr["bg_cyan"] = 46
+g_attr["bg_white"] = 47
+g_attr["bg_default"] = 49
+
+
+# make_seq("blue", "black", "normal")
+def color(fg, bg="default", attr=["normal"]):
+ mystr = esc_seq[:] + "%02d" % g_attr[fg]
+ for x in [bg]+attr:
+ mystr += ";%02d" % g_attr[x]
+ return mystr+"m"
+
+
+
+codes={}
+codes["reset"] = esc_seq + "39;49;00m"
+
+codes["bold"] = esc_seq + "01m"
+codes["faint"] = esc_seq + "02m"
+codes["standout"] = esc_seq + "03m"
+codes["underline"] = esc_seq + "04m"
+codes["blink"] = esc_seq + "05m"
+codes["overline"] = esc_seq + "06m" # Who made this up? Seriously.
+
+ansi_color_codes = []
+for x in xrange(30, 38):
+ ansi_color_codes.append("%im" % x)
+ ansi_color_codes.append("%i;01m" % x)
+
+rgb_ansi_colors = ['0x000000', '0x555555', '0xAA0000', '0xFF5555', '0x00AA00',
+ '0x55FF55', '0xAA5500', '0xFFFF55', '0x0000AA', '0x5555FF', '0xAA00AA',
+ '0xFF55FF', '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF']
+
+for x in xrange(len(rgb_ansi_colors)):
+ codes[rgb_ansi_colors[x]] = esc_seq + ansi_color_codes[x]
+
+del x
+
+codes["black"] = codes["0x000000"]
+codes["darkgray"] = codes["0x555555"]
+
+codes["red"] = codes["0xFF5555"]
+codes["darkred"] = codes["0xAA0000"]
+
+codes["green"] = codes["0x55FF55"]
+codes["darkgreen"] = codes["0x00AA00"]
+
+codes["yellow"] = codes["0xFFFF55"]
+codes["brown"] = codes["0xAA5500"]
+
+codes["blue"] = codes["0x5555FF"]
+codes["darkblue"] = codes["0x0000AA"]
+
+codes["fuchsia"] = codes["0xFF55FF"]
+codes["purple"] = codes["0xAA00AA"]
+
+codes["turquoise"] = codes["0x55FFFF"]
+codes["teal"] = codes["0x00AAAA"]
+
+codes["white"] = codes["0xFFFFFF"]
+codes["lightgray"] = codes["0xAAAAAA"]
+
+codes["darkteal"] = codes["turquoise"]
+codes["darkyellow"] = codes["brown"]
+codes["fuscia"] = codes["fuchsia"]
+codes["white"] = codes["bold"]
+
+# Colors from /sbin/functions.sh
+codes["GOOD"] = codes["green"]
+codes["WARN"] = codes["yellow"]
+codes["BAD"] = codes["red"]
+codes["HILITE"] = codes["teal"]
+codes["BRACKET"] = codes["blue"]
+
+# Portage functions
+codes["INFORM"] = codes["darkgreen"]
+codes["UNMERGE_WARN"] = codes["red"]
+codes["MERGE_LIST_PROGRESS"] = codes["yellow"]
+
+def parse_color_map():
+ myfile = COLOR_MAP_FILE
+ ansi_code_pattern = re.compile("^[0-9;]*m$")
+ def strip_quotes(token, quotes):
+ if token[0] in quotes and token[0] == token[-1]:
+ token = token[1:-1]
+ return token
+ try:
+ s = shlex.shlex(open(myfile))
+ s.wordchars = s.wordchars + ";" # for ansi codes
+ d = {}
+ while True:
+ k, o, v = s.get_token(), s.get_token(), s.get_token()
+ if k is s.eof:
+ break
+ if o != "=":
+ raise ParseError("%s%s'%s'" % (s.error_leader(myfile, s.lineno), "expected '=' operator: ", o))
+ k = strip_quotes(k, s.quotes)
+ v = strip_quotes(v, s.quotes)
+ if ansi_code_pattern.match(v):
+ codes[k] = esc_seq + v
+ else:
+ if v in codes:
+ codes[k] = codes[v]
+ else:
+ raise ParseError("%s%s'%s'" % (s.error_leader(myfile, s.lineno), "Undefined: ", v))
+ except (IOError, OSError), e:
+ if e.errno == errno.ENOENT:
+ raise FileNotFound(myfile)
+ elif e.errno == errno.EACCES:
+ raise PermissionDenied(myfile)
+ raise
+
+try:
+ parse_color_map()
+except FileNotFound, e:
+ pass
+except PortageException, e:
+ writemsg("%s\n" % str(e))
+
+def nc_len(mystr):
+ tmp = re.sub(esc_seq + "^m]+m", "", mystr);
+ return len(tmp)
+
+def xtermTitle(mystr, raw=False):
+ if havecolor and dotitles and os.environ.has_key("TERM") and sys.stderr.isatty():
+ myt=os.environ["TERM"]
+ legal_terms = ["xterm","Eterm","aterm","rxvt","screen","kterm","rxvt-unicode","gnome"]
+ for term in legal_terms:
+ if myt.startswith(term):
+ if not raw:
+ mystr = "\x1b]0;%s\x07" % mystr
+ sys.stderr.write(mystr)
+ sys.stderr.flush()
+ break
+
+default_xterm_title = None
+
+def xtermTitleReset():
+ global default_xterm_title
+ if default_xterm_title is None:
+ prompt_command = os.getenv('PROMPT_COMMAND')
+ if prompt_command == "":
+ default_xterm_title = ""
+ elif prompt_command is not None:
+ default_xterm_title = commands.getoutput(prompt_command)
+ else:
+ pwd = os.getenv('PWD','')
+ home = os.getenv('HOME', '')
+ if home != '' and pwd.startswith(home):
+ pwd = '~' + pwd[len(home):]
+ default_xterm_title = '\x1b]0;%s@%s:%s\x07' % (
+ os.getenv('LOGNAME', ''), os.getenv('HOSTNAME', '').split('.', 1)[0], pwd)
+ xtermTitle(default_xterm_title, raw=True)
+
+def notitles():
+ "turn off title setting"
+ dotitles=0
+
+def nocolor():
+ "turn off colorization"
+ global havecolor
+ havecolor=0
+
+def resetColor():
+ return codes["reset"]
+
+def colorize(color_key, text):
+ global havecolor
+ if havecolor:
+ return codes[color_key] + text + codes["reset"]
+ else:
+ return text
+
+compat_functions_colors = ["bold","white","teal","turquoise","darkteal",
+ "fuscia","fuchsia","purple","blue","darkblue","green","darkgreen","yellow",
+ "brown","darkyellow","red","darkred"]
+
+def create_color_func(color_key):
+ def derived_func(*args):
+ newargs = list(args)
+ newargs.insert(0, color_key)
+ return colorize(*newargs)
+ return derived_func
+
+for c in compat_functions_colors:
+ setattr(sys.modules[__name__], c, create_color_func(c))
+
+class EOutput:
+ """
+ Performs fancy terminal formatting for status and informational messages.
+
+ The provided methods produce identical terminal output to the eponymous
+ functions in the shell script C{/sbin/functions.sh} and also accept
+ identical parameters.
+
+ This is not currently a drop-in replacement however, as the output-related
+ functions in C{/sbin/functions.sh} are oriented for use mainly by system
+ init scripts and ebuilds and their output can be customized via certain
+ C{RC_*} environment variables (see C{/etc/conf.d/rc}). B{EOutput} is not
+ customizable in this manner since it's intended for more general uses.
+ Likewise, no logging is provided.
+
+ @ivar quiet: Specifies if output should be silenced.
+ @type quiet: BooleanType
+ @ivar term_columns: Width of terminal in characters. Defaults to the value
+ specified by the shell's C{COLUMNS} variable, else to the queried tty
+ size, else to C{80}.
+ @type term_columns: IntType
+ """
+
+ def __init__(self):
+ self.__last_e_cmd = ""
+ self.__last_e_len = 0
+ self.quiet = False
+ columns = 0
+ try:
+ columns = int(os.getenv("COLUMNS", 0))
+ except ValueError:
+ pass
+ if columns <= 0:
+ try:
+ columns = int(commands.getoutput(
+ 'set -- `stty size 2>/dev/null` ; echo "$2"'))
+ except ValueError:
+ pass
+ if columns <= 0:
+ columns = 80
+ self.term_columns = columns
+
+ def __eend(self, caller, errno, msg):
+ if errno == 0:
+ status_brackets = colorize("BRACKET", "[ ") + colorize("GOOD", "ok") + colorize("BRACKET", " ]")
+ else:
+ status_brackets = colorize("BRACKET", "[ ") + colorize("BAD", "!!") + colorize("BRACKET", " ]")
+ if msg:
+ if caller == "eend":
+ self.eerror(msg[0])
+ elif caller == "ewend":
+ self.ewarn(msg[0])
+ if self.__last_e_cmd != "ebegin":
+ self.__last_e_len = 0
+ print "%*s%s" % ((self.term_columns - self.__last_e_len - 6), "", status_brackets)
+ sys.stdout.flush()
+
+ def ebegin(self, msg):
+ """
+ Shows a message indicating the start of a process.
+
+ @param msg: A very brief (shorter than one line) description of the
+ starting process.
+ @type msg: StringType
+ """
+ msg += " ..."
+ if not self.quiet:
+ self.einfon(msg)
+ self.__last_e_len = len(msg) + 4
+ self.__last_e_cmd = "ebegin"
+
+ def eend(self, errno, *msg):
+ """
+ Indicates the completion of a process, optionally displaying a message
+ via L{eerror} if the process's exit status isn't C{0}.
+
+ @param errno: A standard UNIX C{errno} code returned by processes upon
+ exit.
+ @type errno: IntType
+ @param msg: I{(optional)} An error message, typically a standard UNIX
+ error string corresponding to C{errno}.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ self.__eend("eend", errno, msg)
+ self.__last_e_cmd = "eend"
+
+ def eerror(self, msg):
+ """
+ Shows an error message.
+
+ @param msg: A very brief (shorter than one line) error message.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin": print
+ print colorize("BAD", " * ") + msg
+ sys.stdout.flush()
+ self.__last_e_cmd = "eerror"
+
+ def einfo(self, msg):
+ """
+ Shows an informative message terminated with a newline.
+
+ @param msg: A very brief (shorter than one line) informative message.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin": print
+ print colorize("GOOD", " * ") + msg
+ sys.stdout.flush()
+ self.__last_e_cmd = "einfo"
+
+ def einfon(self, msg):
+ """
+ Shows an informative message terminated without a newline.
+
+ @param msg: A very brief (shorter than one line) informative message.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin": print
+ print colorize("GOOD", " * ") + msg ,
+ sys.stdout.flush()
+ self.__last_e_cmd = "einfon"
+
+ def ewarn(self, msg):
+ """
+ Shows a warning message.
+
+ @param msg: A very brief (shorter than one line) warning message.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin": print
+ print colorize("WARN", " * ") + msg
+ sys.stdout.flush()
+ self.__last_e_cmd = "ewarn"
+
+ def ewend(self, errno, *msg):
+ """
+ Indicates the completion of a process, optionally displaying a message
+ via L{ewarn} if the process's exit status isn't C{0}.
+
+ @param errno: A standard UNIX C{errno} code returned by processes upon
+ exit.
+ @type errno: IntType
+ @param msg: I{(optional)} A warning message, typically a standard UNIX
+ error string corresponding to C{errno}.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ self.__eend("ewend", errno, msg)
+ self.__last_e_cmd = "ewend"
diff --git a/pym/portage/selinux.py b/pym/portage/selinux.py
new file mode 100644
index 00000000..e4d80fa1
--- /dev/null
+++ b/pym/portage/selinux.py
@@ -0,0 +1,8 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+import selinux
+from selinux import is_selinux_enabled
+from selinux_aux import setexec, secure_symlink, secure_rename, \
+ secure_copy, secure_mkdir, getcontext, get_sid, get_lsid
diff --git a/pym/portage/update.py b/pym/portage/update.py
new file mode 100644
index 00000000..1a2a1d88
--- /dev/null
+++ b/pym/portage/update.py
@@ -0,0 +1,224 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: $
+
+import errno, os, re, sys
+
+from portage_util import ConfigProtect, grabfile, new_protect_filename, \
+ normalize_path, write_atomic, writemsg
+from portage_exception import DirectoryNotFound, PortageException
+from portage_versions import ververify
+from portage_dep import dep_getkey, get_operator, isvalidatom, isjustname
+from portage_const import USER_CONFIG_PATH, WORLD_FILE
+
+ignored_dbentries = ("CONTENTS", "environment.bz2")
+
+def update_dbentry(update_cmd, mycontent):
+ if update_cmd[0] == "move":
+ old_value, new_value = update_cmd[1], update_cmd[2]
+ if mycontent.count(old_value):
+ old_value = re.escape(old_value);
+ mycontent = re.sub(old_value+"(:|$|\\s)", new_value+"\\1", mycontent)
+ def myreplace(matchobj):
+ if ververify(matchobj.group(2)):
+ return "%s-%s" % (new_value, matchobj.group(2))
+ else:
+ return "".join(matchobj.groups())
+ mycontent = re.sub("(%s-)(\\S*)" % old_value, myreplace, mycontent)
+ elif update_cmd[0] == "slotmove" and get_operator(update_cmd[1]) is None:
+ pkg, origslot, newslot = update_cmd[1:]
+ old_value = "%s:%s" % (pkg, origslot)
+ if mycontent.count(old_value):
+ old_value = re.escape(old_value)
+ new_value = "%s:%s" % (pkg, newslot)
+ mycontent = re.sub(old_value+"($|\\s)", new_value+"\\1", mycontent)
+ return mycontent
+
+def update_dbentries(update_iter, mydata):
+ """Performs update commands and returns a
+ dict containing only the updated items."""
+ updated_items = {}
+ for k, mycontent in mydata.iteritems():
+ if k not in ignored_dbentries:
+ orig_content = mycontent
+ for update_cmd in update_iter:
+ mycontent = update_dbentry(update_cmd, mycontent)
+ if mycontent != orig_content:
+ updated_items[k] = mycontent
+ return updated_items
+
+def fixdbentries(update_iter, dbdir):
+ """Performs update commands which result in search and replace operations
+ for each of the files in dbdir (excluding CONTENTS and environment.bz2).
+ Returns True when actual modifications are necessary and False otherwise."""
+ mydata = {}
+ for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
+ file_path = os.path.join(dbdir, myfile)
+ f = open(file_path, "r")
+ mydata[myfile] = f.read()
+ f.close()
+ updated_items = update_dbentries(update_iter, mydata)
+ for myfile, mycontent in updated_items.iteritems():
+ file_path = os.path.join(dbdir, myfile)
+ write_atomic(file_path, mycontent)
+ return len(updated_items) > 0
+
+def grab_updates(updpath, prev_mtimes=None):
+ """Returns all the updates from the given directory as a sorted list of
+ tuples, each containing (file_path, statobj, content). If prev_mtimes is
+ given then only updates with differing mtimes are considered."""
+ try:
+ mylist = os.listdir(updpath)
+ except OSError, oe:
+ if oe.errno == errno.ENOENT:
+ raise DirectoryNotFound(updpath)
+ raise
+ if prev_mtimes is None:
+ prev_mtimes = {}
+ # validate the file name (filter out CVS directory, etc...)
+ mylist = [myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"]
+ if len(mylist) == 0:
+ return []
+
+ # update names are mangled to make them sort properly
+ mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
+ mylist.sort()
+ mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]
+
+ update_data = []
+ for myfile in mylist:
+ file_path = os.path.join(updpath, myfile)
+ mystat = os.stat(file_path)
+ if file_path not in prev_mtimes or \
+ long(prev_mtimes[file_path]) != long(mystat.st_mtime):
+ f = open(file_path)
+ content = f.read()
+ f.close()
+ update_data.append((file_path, mystat, content))
+ return update_data
+
+def parse_updates(mycontent):
+ """Valid updates are returned as a list of split update commands."""
+ myupd = []
+ errors = []
+ mylines = mycontent.splitlines()
+ for myline in mylines:
+ mysplit = myline.split()
+ if len(mysplit) == 0:
+ continue
+ if mysplit[0] not in ("move", "slotmove"):
+ errors.append("ERROR: Update type not recognized '%s'" % myline)
+ continue
+ if mysplit[0] == "move":
+ if len(mysplit) != 3:
+ errors.append("ERROR: Update command invalid '%s'" % myline)
+ continue
+ orig_value, new_value = mysplit[1], mysplit[2]
+ for cp in (orig_value, new_value):
+ if not (isvalidatom(cp) and isjustname(cp)):
+ errors.append(
+ "ERROR: Malformed update entry '%s'" % myline)
+ continue
+ if mysplit[0] == "slotmove":
+ if len(mysplit)!=4:
+ errors.append("ERROR: Update command invalid '%s'" % myline)
+ continue
+ pkg, origslot, newslot = mysplit[1], mysplit[2], mysplit[3]
+ if not isvalidatom(pkg):
+ errors.append("ERROR: Malformed update entry '%s'" % myline)
+ continue
+
+ # The list of valid updates is filtered by continue statements above.
+ myupd.append(mysplit)
+ return myupd, errors
+
+def update_config_files(config_root, protect, protect_mask, update_iter):
+ """Perform global updates on /etc/portage/package.* and the world file.
+ config_root - location of files to update
+ protect - list of paths from CONFIG_PROTECT
+ protect_mask - list of paths from CONFIG_PROTECT_MASK
+ update_iter - list of update commands as returned from parse_updates()"""
+ config_root = normalize_path(config_root)
+ update_files = {}
+ file_contents = {}
+ myxfiles = ["package.mask", "package.unmask", \
+ "package.keywords", "package.use"]
+ myxfiles += [os.path.join("profile", x) for x in myxfiles]
+ abs_user_config = os.path.join(config_root,
+ USER_CONFIG_PATH.lstrip(os.path.sep))
+ recursivefiles = []
+ for x in myxfiles:
+ config_file = os.path.join(abs_user_config, x)
+ if os.path.isdir(config_file):
+ for parent, dirs, files in os.walk(config_file):
+ for y in dirs:
+ if y.startswith("."):
+ dirs.remove(y)
+ for y in files:
+ if y.startswith("."):
+ continue
+ recursivefiles.append(
+ os.path.join(parent, y)[len(abs_user_config) + 1:])
+ else:
+ recursivefiles.append(x)
+ myxfiles = recursivefiles
+ for x in myxfiles:
+ try:
+ myfile = open(os.path.join(abs_user_config, x),"r")
+ file_contents[x] = myfile.readlines()
+ myfile.close()
+ except IOError:
+ if file_contents.has_key(x):
+ del file_contents[x]
+ continue
+ worldlist = grabfile(os.path.join(config_root, WORLD_FILE))
+
+ for update_cmd in update_iter:
+ if update_cmd[0] == "move":
+ old_value, new_value = update_cmd[1], update_cmd[2]
+ #update world entries:
+ for x in range(0,len(worldlist)):
+ #update world entries, if any.
+ worldlist[x] = \
+ dep_transform(worldlist[x], old_value, new_value)
+
+ #update /etc/portage/packages.*
+ for x in file_contents:
+ for mypos in range(0,len(file_contents[x])):
+ line = file_contents[x][mypos]
+ if line[0] == "#" or not line.strip():
+ continue
+ myatom = line.split()[0]
+ if myatom.startswith("-"):
+ # package.mask supports incrementals
+ myatom = myatom[1:]
+ if not isvalidatom(myatom):
+ continue
+ key = dep_getkey(myatom)
+ if key == old_value:
+ file_contents[x][mypos] = \
+ line.replace(old_value, new_value)
+ update_files[x] = 1
+ sys.stdout.write("p")
+ sys.stdout.flush()
+
+ write_atomic(os.path.join(config_root, WORLD_FILE), "\n".join(worldlist))
+
+ protect_obj = ConfigProtect(
+ config_root, protect, protect_mask)
+ for x in update_files:
+ updating_file = os.path.join(abs_user_config, x)
+ if protect_obj.isprotected(updating_file):
+ updating_file = new_protect_filename(updating_file)
+ try:
+ write_atomic(updating_file, "".join(file_contents[x]))
+ except PortageException, e:
+ writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
+ writemsg("!!! An error occured while updating a config file:" + \
+ " '%s'\n" % updating_file, noiselevel=-1)
+ continue
+
+def dep_transform(mydep, oldkey, newkey):
+ if dep_getkey(mydep) == oldkey:
+ return mydep.replace(oldkey, newkey, 1)
+ return mydep
diff --git a/pym/portage/util.py b/pym/portage/util.py
new file mode 100644
index 00000000..cc5a566b
--- /dev/null
+++ b/pym/portage/util.py
@@ -0,0 +1,1037 @@
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+from portage_exception import PortageException, FileNotFound, \
+ OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem
+import portage_exception
+from portage_dep import isvalidatom
+
+import os, errno, shlex, stat, string, sys
+try:
+ import cPickle
+except ImportError:
+ import pickle as cPickle
+
+if not hasattr(__builtins__, "set"):
+ from sets import Set as set
+
+noiselimit = 0
+
+def writemsg(mystr,noiselevel=0,fd=None):
+ """Prints out warning and debug messages based on the noiselimit setting"""
+ global noiselimit
+ if fd is None:
+ fd = sys.stderr
+ if noiselevel <= noiselimit:
+ fd.write(mystr)
+ fd.flush()
+
+def writemsg_stdout(mystr,noiselevel=0):
+ """Prints messages stdout based on the noiselimit setting"""
+ writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
+
+def normalize_path(mypath):
+ """
+ os.path.normpath("//foo") returns "//foo" instead of "/foo"
+ We dislike this behavior so we create our own normpath func
+ to fix it.
+ """
+ if mypath.startswith(os.path.sep):
+ # posixpath.normpath collapses 3 or more leading slashes to just 1.
+ return os.path.normpath(2*os.path.sep + mypath)
+ else:
+ return os.path.normpath(mypath)
+
+def grabfile(myfilename, compat_level=0, recursive=0):
+ """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
+ begins with a #, it is ignored, as are empty lines"""
+
+ mylines=grablines(myfilename, recursive)
+ newlines=[]
+ for x in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ myline=" ".join(x.split())
+ if not len(myline):
+ continue
+ if myline[0]=="#":
+ # Check if we have a compat-level string. BC-integration data.
+ # '##COMPAT==>N<==' 'some string attached to it'
+ mylinetest = myline.split("<==",1)
+ if len(mylinetest) == 2:
+ myline_potential = mylinetest[1]
+ mylinetest = mylinetest[0].split("##COMPAT==>")
+ if len(mylinetest) == 2:
+ if compat_level >= int(mylinetest[1]):
+ # It's a compat line, and the key matches.
+ newlines.append(myline_potential)
+ continue
+ else:
+ continue
+ newlines.append(myline)
+ return newlines
+
+def map_dictlist_vals(func,myDict):
+ """Performs a function on each value of each key in a dictlist.
+ Returns a new dictlist."""
+ new_dl = {}
+ for key in myDict.keys():
+ new_dl[key] = []
+ new_dl[key] = map(func,myDict[key])
+ return new_dl
+
+def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
+ """
+ Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->list.
+ Returns a single dict. Higher index in lists is preferenced.
+
+ Example usage:
+ >>> from portage_util import stack_dictlist
+ >>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
+ >>> {'a':'b','x':'y'}
+ >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
+ >>> {'a':['b','c'] }
+ >>> a = {'KEYWORDS':['x86','alpha']}
+ >>> b = {'KEYWORDS':['-x86']}
+ >>> print stack_dictlist( [a,b] )
+ >>> { 'KEYWORDS':['x86','alpha','-x86']}
+ >>> print stack_dictlist( [a,b], incremental=True)
+ >>> { 'KEYWORDS':['alpha'] }
+ >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
+ >>> { 'KEYWORDS':['alpha'] }
+
+ @param original_dicts a list of (dictionary objects or None)
+ @type list
+ @param incremental True or false depending on whether new keys should overwrite
+ keys which already exist.
+ @type boolean
+ @param incrementals A list of items that should be incremental (-foo removes foo from
+ the returned dict).
+ @type list
+ @param ignore_none Appears to be ignored, but probably was used long long ago.
+ @type boolean
+
+ """
+ final_dict = {}
+ for mydict in original_dicts:
+ if mydict is None:
+ continue
+ for y in mydict.keys():
+ if not y in final_dict:
+ final_dict[y] = []
+
+ for thing in mydict[y]:
+ if thing:
+ if incremental or y in incrementals:
+ if thing == "-*":
+ final_dict[y] = []
+ continue
+ elif thing.startswith("-"):
+ try:
+ final_dict[y].remove(thing[1:])
+ except ValueError:
+ pass
+ continue
+ if thing not in final_dict[y]:
+ final_dict[y].append(thing)
+ if y in final_dict and not final_dict[y]:
+ del final_dict[y]
+ return final_dict
+
+def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
+ """Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->string.
+ Returns a single dict."""
+ final_dict = None
+ for mydict in dicts:
+ if mydict is None:
+ if ignore_none:
+ continue
+ else:
+ return None
+ if final_dict is None:
+ final_dict = {}
+ for y in mydict.keys():
+ if mydict[y]:
+ if final_dict.has_key(y) and (incremental or (y in incrementals)):
+ final_dict[y] += " "+mydict[y][:]
+ else:
+ final_dict[y] = mydict[y][:]
+ mydict[y] = " ".join(mydict[y].split()) # Remove extra spaces.
+ return final_dict
+
+def stack_lists(lists, incremental=1):
+ """Stacks an array of list-types into one array. Optionally removing
+ distinct values using '-value' notation. Higher index is preferenced.
+
+ all elements must be hashable."""
+
+ new_list = {}
+ for x in lists:
+ for y in filter(None, x):
+ if incremental:
+ if y == "-*":
+ new_list.clear()
+ elif y.startswith("-"):
+ new_list.pop(y[1:], None)
+ else:
+ new_list[y] = True
+ else:
+ new_list[y] = True
+ return new_list.keys()
+
+def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
+ """
+ This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
+
+ @param myfilename: file to process
+ @type myfilename: string (path)
+ @param juststrings: only return strings
+ @type juststrings: Boolean (integer)
+ @param empty: Ignore certain lines
+ @type empty: Boolean (integer)
+ @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
+ @type recursive: Boolean (integer)
+ @param incremental: Append to the return list, don't overwrite
+ @type incremental: Boolean (integer)
+ @rtype: Dictionary
+ @returns:
+ 1. Returns the lines in a file in a dictionary, for example:
+ 'sys-apps/portage x86 amd64 ppc'
+ would return
+ { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
+ the line syntax is key : [list of values]
+ """
+ newdict={}
+ for x in grablines(myfilename, recursive):
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ if x[0] == "#":
+ continue
+ myline=x.split()
+ if len(myline) < 2 and empty == 0:
+ continue
+ if len(myline) < 1 and empty == 1:
+ continue
+ if incremental:
+ newdict.setdefault(myline[0], []).extend(myline[1:])
+ else:
+ newdict[myline[0]] = myline[1:]
+ if juststrings:
+ for k, v in newdict.iteritems():
+ newdict[k] = " ".join(v)
+ return newdict
+
+def grabdict_package(myfilename, juststrings=0, recursive=0):
+ pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
+ # We need to call keys() here in order to avoid the possibility of
+ # "RuntimeError: dictionary changed size during iteration"
+ # when an invalid atom is deleted.
+ for x in pkgs.keys():
+ if not isvalidatom(x):
+ del(pkgs[x])
+ writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x),
+ noiselevel=-1)
+ return pkgs
+
+def grabfile_package(myfilename, compatlevel=0, recursive=0):
+ pkgs=grabfile(myfilename, compatlevel, recursive=recursive)
+ for x in range(len(pkgs)-1, -1, -1):
+ pkg = pkgs[x]
+ if pkg[0] == "-":
+ pkg = pkg[1:]
+ if pkg[0] == "*": # Kill this so we can deal the "packages" file too
+ pkg = pkg[1:]
+ if not isvalidatom(pkg):
+ writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]),
+ noiselevel=-1)
+ del(pkgs[x])
+ return pkgs
+
+def grablines(myfilename,recursive=0):
+ mylines=[]
+ if recursive and os.path.isdir(myfilename):
+ if myfilename in ["RCS", "CVS", "SCCS"]:
+ return mylines
+ dirlist = os.listdir(myfilename)
+ dirlist.sort()
+ for f in dirlist:
+ if not f.startswith(".") and not f.endswith("~"):
+ mylines.extend(grablines(
+ os.path.join(myfilename, f), recursive))
+ else:
+ try:
+ myfile = open(myfilename, "r")
+ mylines = myfile.readlines()
+ myfile.close()
+ except IOError:
+ pass
+ return mylines
+
+def writedict(mydict,myfilename,writekey=True):
+ """Writes out a dict to a file; writekey=0 mode doesn't write out
+ the key and assumes all values are strings, not lists."""
+ myfile = None
+ try:
+ myfile = atomic_ofstream(myfilename)
+ if not writekey:
+ for x in mydict.values():
+ myfile.write(x+"\n")
+ else:
+ for x in mydict.keys():
+ myfile.write("%s %s\n" % (x, " ".join(mydict[x])))
+ myfile.close()
+ except IOError:
+ if myfile is not None:
+ myfile.abort()
+ return 0
+ return 1
+
+def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
+ mykeys={}
+ try:
+ f=open(mycfg,'r')
+ except IOError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ return None
+ try:
+ lex = shlex.shlex(f, posix=True)
+ lex.wordchars=string.digits+string.letters+"~!@#$%*_\:;?,./-+{}"
+ lex.quotes="\"'"
+ if allow_sourcing:
+ lex.source="source"
+ while 1:
+ key=lex.get_token()
+ if key == "export":
+ key = lex.get_token()
+ if key is None:
+ #normal end of file
+ break;
+ equ=lex.get_token()
+ if (equ==''):
+ #unexpected end of file
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n",
+ noiselevel=-1)
+ raise Exception("ParseError: Unexpected EOF: "+str(mycfg)+": on/before line "+str(lex.lineno))
+ else:
+ return mykeys
+ elif (equ!='='):
+ #invalid token
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Invalid token (not \"=\") "+str(equ)+"\n",
+ noiselevel=-1)
+ raise Exception("ParseError: Invalid token (not '='): "+str(mycfg)+": line "+str(lex.lineno))
+ else:
+ return mykeys
+ val=lex.get_token()
+ if val is None:
+ #unexpected end of file
+ #lex.error_leader(self.filename,lex.lineno)
+ if not tolerant:
+ writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n",
+ noiselevel=-1)
+ raise portage_exception.CorruptionError("ParseError: Unexpected EOF: "+str(mycfg)+": line "+str(lex.lineno))
+ else:
+ return mykeys
+ if expand:
+ mykeys[key] = varexpand(val, mykeys)
+ else:
+ mykeys[key] = val
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ raise portage_exception.ParseError(str(e)+" in "+mycfg)
+ return mykeys
+
+#cache expansions of constant strings
+cexpand={}
+def varexpand(mystring,mydict={}):
+ newstring = cexpand.get(" "+mystring, None)
+ if newstring is not None:
+ return newstring
+
+ """
+ new variable expansion code. Removes quotes, handles \n, etc.
+ This code is used by the configfile code, as well as others (parser)
+ This would be a good bunch of code to port to C.
+ """
+ numvars=0
+ mystring=" "+mystring
+ #in single, double quotes
+ insing=0
+ indoub=0
+ pos=1
+ newstring=" "
+ while (pos<len(mystring)):
+ if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
+ if (indoub):
+ newstring=newstring+"'"
+ else:
+ insing=not insing
+ pos=pos+1
+ continue
+ elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
+ if (insing):
+ newstring=newstring+'"'
+ else:
+ indoub=not indoub
+ pos=pos+1
+ continue
+ if (not insing):
+ #expansion time
+ if (mystring[pos]=="\n"):
+ #convert newlines to spaces
+ newstring=newstring+" "
+ pos=pos+1
+ elif (mystring[pos]=="\\"):
+ #backslash expansion time
+ if (pos+1>=len(mystring)):
+ newstring=newstring+mystring[pos]
+ break
+ else:
+ a=mystring[pos+1]
+ pos=pos+2
+ if a=='a':
+ newstring=newstring+chr(007)
+ elif a=='b':
+ newstring=newstring+chr(010)
+ elif a=='e':
+ newstring=newstring+chr(033)
+ elif (a=='f') or (a=='n'):
+ newstring=newstring+chr(012)
+ elif a=='r':
+ newstring=newstring+chr(015)
+ elif a=='t':
+ newstring=newstring+chr(011)
+ elif a=='v':
+ newstring=newstring+chr(013)
+ elif a!='\n':
+ #remove backslash only, as bash does: this takes care of \\ and \' and \" as well
+ newstring=newstring+mystring[pos-1:pos]
+ continue
+ elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
+ pos=pos+1
+ if mystring[pos]=="{":
+ pos=pos+1
+ braced=True
+ else:
+ braced=False
+ myvstart=pos
+ validchars=string.ascii_letters+string.digits+"_"
+ while mystring[pos] in validchars:
+ if (pos+1)>=len(mystring):
+ if braced:
+ cexpand[mystring]=""
+ return ""
+ else:
+ pos=pos+1
+ break
+ pos=pos+1
+ myvarname=mystring[myvstart:pos]
+ if braced:
+ if mystring[pos]!="}":
+ cexpand[mystring]=""
+ return ""
+ else:
+ pos=pos+1
+ if len(myvarname)==0:
+ cexpand[mystring]=""
+ return ""
+ numvars=numvars+1
+ if mydict.has_key(myvarname):
+ newstring=newstring+mydict[myvarname]
+ else:
+ newstring=newstring+mystring[pos]
+ pos=pos+1
+ else:
+ newstring=newstring+mystring[pos]
+ pos=pos+1
+ if numvars==0:
+ cexpand[mystring]=newstring[1:]
+ return newstring[1:]
+
+def pickle_write(data,filename,debug=0):
+ import os
+ try:
+ myf=open(filename,"w")
+ cPickle.dump(data,myf,-1)
+ myf.flush()
+ myf.close()
+ writemsg("Wrote pickle: "+str(filename)+"\n",1)
+ os.chown(myefn,uid,portage_gid)
+ os.chmod(myefn,0664)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ return 0
+ return 1
+
+def pickle_read(filename,default=None,debug=0):
+ import os
+ if not os.access(filename, os.R_OK):
+ writemsg("pickle_read(): File not readable. '"+filename+"'\n",1)
+ return default
+ data = None
+ try:
+ myf = open(filename)
+ mypickle = cPickle.Unpickler(myf)
+ mypickle.find_global = None
+ data = mypickle.load()
+ myf.close()
+ del mypickle,myf
+ writemsg("pickle_read(): Loaded pickle. '"+filename+"'\n",1)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ writemsg("!!! Failed to load pickle: "+str(e)+"\n",1)
+ data = default
+ return data
+
+def dump_traceback(msg, noiselevel=1):
+ import sys, traceback
+ info = sys.exc_info()
+ if not info[2]:
+ stack = traceback.extract_stack()[:-1]
+ error = None
+ else:
+ stack = traceback.extract_tb(info[2])
+ error = str(info[1])
+ writemsg("\n====================================\n", noiselevel=noiselevel)
+ writemsg("%s\n\n" % msg, noiselevel=noiselevel)
+ for line in traceback.format_list(stack):
+ writemsg(line, noiselevel=noiselevel)
+ if error:
+ writemsg(error+"\n", noiselevel=noiselevel)
+ writemsg("====================================\n\n", noiselevel=noiselevel)
+
+def unique_array(s):
+ """lifted from python cookbook, credit: Tim Peters
+ Return a list of the elements in s in arbitrary order, sans duplicates"""
+ n = len(s)
+ # assume all elements are hashable, if so, it's linear
+ try:
+ return list(set(s))
+ except TypeError:
+ pass
+
+ # so much for linear. abuse sort.
+ try:
+ t = list(s)
+ t.sort()
+ except TypeError:
+ pass
+ else:
+ assert n > 0
+ last = t[0]
+ lasti = i = 1
+ while i < n:
+ if t[i] != last:
+ t[lasti] = last = t[i]
+ lasti += 1
+ i += 1
+ return t[:lasti]
+
+ # blah. back to original portage.unique_array
+ u = []
+ for x in s:
+ if x not in u:
+ u.append(x)
+ return u
+
+def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+ stat_cached=None, follow_links=True):
+ """Apply user, group, and mode bits to a file if the existing bits do not
+ already match. The default behavior is to force an exact match of mode
+ bits. When mask=0 is specified, mode bits on the target file are allowed
+ to be a superset of the mode argument (via logical OR). When mask>0, the
+ mode bits that the target file is allowed to have are restricted via
+ logical XOR.
+ Returns True if the permissions were modified and False otherwise."""
+
+ modified = False
+
+ if stat_cached is None:
+ try:
+ if follow_links:
+ stat_cached = os.stat(filename)
+ else:
+ stat_cached = os.lstat(filename)
+ except OSError, oe:
+ func_call = "stat('%s')" % filename
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ if (uid != -1 and uid != stat_cached.st_uid) or \
+ (gid != -1 and gid != stat_cached.st_gid):
+ try:
+ if follow_links:
+ os.chown(filename, uid, gid)
+ else:
+ import portage_data
+ portage_data.lchown(filename, uid, gid)
+ modified = True
+ except OSError, oe:
+ func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ new_mode = -1
+ st_mode = stat_cached.st_mode & 07777 # protect from unwanted bits
+ if mask >= 0:
+ if mode == -1:
+ mode = 0 # Don't add any mode bits when mode is unspecified.
+ else:
+ mode = mode & 07777
+ if (mode & st_mode != mode) or \
+ ((mask ^ st_mode) & st_mode != st_mode):
+ new_mode = mode | st_mode
+ new_mode = (mask ^ new_mode) & new_mode
+ elif mode != -1:
+ mode = mode & 07777 # protect from unwanted bits
+ if mode != st_mode:
+ new_mode = mode
+
+ # The chown system call may clear S_ISUID and S_ISGID
+ # bits, so those bits are restored if necessary.
+ if modified and new_mode == -1 and \
+ (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
+ if mode == -1:
+ new_mode = st_mode
+ else:
+ mode = mode & 07777
+ if mask >= 0:
+ new_mode = mode | st_mode
+ new_mode = (mask ^ new_mode) & new_mode
+ else:
+ new_mode = mode
+ if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
+ new_mode = -1
+
+ if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
+ # Mode doesn't matter for symlinks.
+ new_mode = -1
+
+ if new_mode != -1:
+ try:
+ os.chmod(filename, new_mode)
+ modified = True
+ except OSError, oe:
+ func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ raise
+ return modified
+
+def apply_stat_permissions(filename, newstat, **kwargs):
+ """A wrapper around apply_secpass_permissions that gets
+ uid, gid, and mode from a stat object"""
+ return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
+ mode=newstat.st_mode, **kwargs)
+
+def apply_recursive_permissions(top, uid=-1, gid=-1,
+ dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
+ """A wrapper around apply_secpass_permissions that applies permissions
+ recursively. If optional argument onerror is specified, it should be a
+ function; it will be called with one argument, a PortageException instance.
+ Returns True if all permissions are applied and False if some are left
+ unapplied."""
+
+ if onerror is None:
+ # Default behavior is to dump errors to stderr so they won't
+ # go unnoticed. Callers can pass in a quiet instance.
+ def onerror(e):
+ if isinstance(e, OperationNotPermitted):
+ writemsg("Operation Not Permitted: %s\n" % str(e),
+ noiselevel=-1)
+ elif isinstance(e, FileNotFound):
+ writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
+ else:
+ raise
+
+ all_applied = True
+ for dirpath, dirnames, filenames in os.walk(top):
+ try:
+ applied = apply_secpass_permissions(dirpath,
+ uid=uid, gid=gid, mode=dirmode, mask=dirmask)
+ if not applied:
+ all_applied = False
+ except PortageException, e:
+ all_applied = False
+ onerror(e)
+
+ for name in filenames:
+ try:
+ applied = apply_secpass_permissions(os.path.join(dirpath, name),
+ uid=uid, gid=gid, mode=filemode, mask=filemask)
+ if not applied:
+ all_applied = False
+ except PortageException, e:
+ all_applied = False
+ onerror(e)
+ return all_applied
+
+def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+ stat_cached=None, follow_links=True):
+ """A wrapper around apply_permissions that uses secpass and simple
+ logic to apply as much of the permissions as possible without
+ generating an obviously avoidable permission exception. Despite
+ attempts to avoid an exception, it's possible that one will be raised
+ anyway, so be prepared.
+ Returns True if all permissions are applied and False if some are left
+ unapplied."""
+
+ if stat_cached is None:
+ try:
+ if follow_links:
+ stat_cached = os.stat(filename)
+ else:
+ stat_cached = os.lstat(filename)
+ except OSError, oe:
+ func_call = "stat('%s')" % filename
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ all_applied = True
+
+ import portage_data # not imported globally because of circular dep
+ if portage_data.secpass < 2:
+
+ if uid != -1 and \
+ uid != stat_cached.st_uid:
+ all_applied = False
+ uid = -1
+
+ if gid != -1 and \
+ gid != stat_cached.st_gid and \
+ gid not in os.getgroups():
+ all_applied = False
+ gid = -1
+
+ apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
+ stat_cached=stat_cached, follow_links=follow_links)
+ return all_applied
+
+class atomic_ofstream(file):
+ """Write a file atomically via os.rename(). Atomic replacement prevents
+ interprocess interference and prevents corruption of the target
+ file when the write is interrupted (for example, when an 'out of space'
+ error occurs)."""
+
+ def __init__(self, filename, mode='w', follow_links=True, **kargs):
+ """Opens a temporary filename.pid in the same directory as filename."""
+ self._aborted = False
+
+ if follow_links:
+ canonical_path = os.path.realpath(filename)
+ self._real_name = canonical_path
+ tmp_name = "%s.%i" % (canonical_path, os.getpid())
+ try:
+ super(atomic_ofstream, self).__init__(tmp_name, mode=mode, **kargs)
+ return
+ except (OSError, IOError), e:
+ if canonical_path == filename:
+ raise
+ writemsg("!!! Failed to open file: '%s'\n" % tmp_name,
+ noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+
+ self._real_name = filename
+ tmp_name = "%s.%i" % (filename, os.getpid())
+ super(atomic_ofstream, self).__init__(tmp_name, mode=mode, **kargs)
+
+ def close(self):
+ """Closes the temporary file, copies permissions (if possible),
+ and performs the atomic replacement via os.rename(). If the abort()
+ method has been called, then the temp file is closed and removed."""
+ if not self.closed:
+ try:
+ super(atomic_ofstream, self).close()
+ if not self._aborted:
+ try:
+ apply_stat_permissions(self.name, os.stat(self._real_name))
+ except OperationNotPermitted:
+ pass
+ except FileNotFound:
+ pass
+ except OSError, oe: # from the above os.stat call
+ if oe.errno in (errno.ENOENT, errno.EPERM):
+ pass
+ else:
+ raise
+ os.rename(self.name, self._real_name)
+ finally:
+ # Make sure we cleanup the temp file
+ # even if an exception is raised.
+ try:
+ os.unlink(self.name)
+ except OSError, oe:
+ pass
+
+ def abort(self):
+ """If an error occurs while writing the file, the user should
+ call this method in order to leave the target file unchanged.
+ This will call close() automatically."""
+ if not self._aborted:
+ self._aborted = True
+ self.close()
+
+ def __del__(self):
+ """If the user does not explicitely call close(), it is
+ assumed that an error has occurred, so we abort()."""
+ if not self.closed:
+ self.abort()
+ # ensure destructor from the base class is called
+ base_destructor = getattr(super(atomic_ofstream, self), '__del__', None)
+ if base_destructor is not None:
+ base_destructor()
+
+def write_atomic(file_path, content):
+ f = None
+ try:
+ f = atomic_ofstream(file_path)
+ f.write(content)
+ f.close()
+ except (IOError, OSError), e:
+ if f:
+ f.abort()
+ func_call = "write_atomic('%s')" % file_path
+ if e.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif e.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif e.errno == errno.ENOENT:
+ raise FileNotFound(file_path)
+ else:
+ raise
+
+def ensure_dirs(dir_path, *args, **kwargs):
+ """Create a directory and call apply_permissions.
+ Returns True if a directory is created or the permissions needed to be
+ modified, and False otherwise."""
+
+ created_dir = False
+
+ try:
+ os.makedirs(dir_path)
+ created_dir = True
+ except OSError, oe:
+ func_call = "makedirs('%s')" % dir_path
+ if errno.EEXIST == oe.errno:
+ pass
+ elif oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ else:
+ raise
+ perms_modified = apply_permissions(dir_path, *args, **kwargs)
+ return created_dir or perms_modified
+
+class LazyItemsDict(dict):
+ """A mapping object that behaves like a standard dict except that it allows
+ for lazy initialization of values via callable objects. Lazy items can be
+ overwritten and deleted just as normal items."""
+ def __init__(self, initial_items=None):
+ dict.__init__(self)
+ self.lazy_items = {}
+ if initial_items is not None:
+ self.update(initial_items)
+ def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
+ """Add a lazy item for the given key. When the item is requested,
+ value_callable will be called with *pargs and **kwargs arguments."""
+ self.lazy_items[item_key] = (value_callable, pargs, kwargs)
+ # make it show up in self.keys(), etc...
+ dict.__setitem__(self, item_key, None)
+ def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
+ """This is like addLazyItem except value_callable will only be called
+ a maximum of 1 time and the result will be cached for future requests."""
+ class SingletonItem(object):
+ def __init__(self, value_callable, *pargs, **kwargs):
+ self._callable = value_callable
+ self._pargs = pargs
+ self._kwargs = kwargs
+ self._called = False
+ def __call__(self):
+ if not self._called:
+ self._called = True
+ self._value = self._callable(*self._pargs, **self._kwargs)
+ return self._value
+ self.addLazyItem(item_key, SingletonItem(value_callable, *pargs, **kwargs))
+ def update(self, map_obj):
+ if isinstance(map_obj, LazyItemsDict):
+ for k in map_obj:
+ if k in map_obj.lazy_items:
+ dict.__setitem__(self, k, None)
+ else:
+ dict.__setitem__(self, k, map_obj[k])
+ self.lazy_items.update(map_obj.lazy_items)
+ else:
+ dict.update(self, map_obj)
+ def __getitem__(self, item_key):
+ if item_key in self.lazy_items:
+ value_callable, pargs, kwargs = self.lazy_items[item_key]
+ return value_callable(*pargs, **kwargs)
+ else:
+ return dict.__getitem__(self, item_key)
+ def __setitem__(self, item_key, value):
+ if item_key in self.lazy_items:
+ del self.lazy_items[item_key]
+ dict.__setitem__(self, item_key, value)
+ def __delitem__(self, item_key):
+ if item_key in self.lazy_items:
+ del self.lazy_items[item_key]
+ dict.__delitem__(self, item_key)
+
+class ConfigProtect(object):
+ def __init__(self, myroot, protect_list, mask_list):
+ self.myroot = myroot
+ self.protect_list = protect_list
+ self.mask_list = mask_list
+ self.updateprotect()
+
+ def updateprotect(self):
+ """Update internal state for isprotected() calls. Nonexistent paths
+ are ignored."""
+ self.protect = []
+ self._dirs = set()
+ for x in self.protect_list:
+ ppath = normalize_path(
+ os.path.join(self.myroot, x.lstrip(os.path.sep)))
+ mystat = None
+ try:
+ if stat.S_ISDIR(os.stat(ppath).st_mode):
+ self._dirs.add(ppath)
+ self.protect.append(ppath)
+ except OSError:
+ # If it doesn't exist, there's no need to protect it.
+ pass
+
+ self.protectmask = []
+ for x in self.mask_list:
+ ppath = normalize_path(
+ os.path.join(self.myroot, x.lstrip(os.path.sep)))
+ mystat = None
+ try:
+ """Use lstat so that anything, even a broken symlink can be
+ protected."""
+ if stat.S_ISDIR(os.lstat(ppath).st_mode):
+ self._dirs.add(ppath)
+ self.protectmask.append(ppath)
+ """Now use stat in case this is a symlink to a directory."""
+ if stat.S_ISDIR(os.stat(ppath).st_mode):
+ self._dirs.add(ppath)
+ except OSError:
+ # If it doesn't exist, there's no need to mask it.
+ pass
+
+ def isprotected(self, obj):
+ """Returns True if obj is protected, False otherwise. The caller must
+ ensure that obj is normalized with a single leading slash. A trailing
+ slash is optional for directories."""
+ masked = 0
+ protected = 0
+ sep = os.path.sep
+ for ppath in self.protect:
+ if len(ppath) > masked and obj.startswith(ppath):
+ if ppath in self._dirs:
+ if obj != ppath and not obj.startswith(ppath + sep):
+ # /etc/foo does not match /etc/foobaz
+ continue
+ elif obj != ppath:
+ # force exact match when CONFIG_PROTECT lists a
+ # non-directory
+ continue
+ protected = len(ppath)
+ #config file management
+ for pmpath in self.protectmask:
+ if len(pmpath) >= protected and obj.startswith(pmpath):
+ if pmpath in self._dirs:
+ if obj != pmpath and \
+ not obj.startswith(pmpath + sep):
+ # /etc/foo does not match /etc/foobaz
+ continue
+ elif obj != pmpath:
+ # force exact match when CONFIG_PROTECT_MASK lists
+ # a non-directory
+ continue
+ #skip, it's in the mask
+ masked = len(pmpath)
+ return protected > masked
+
+def new_protect_filename(mydest, newmd5=None):
+ """Resolves a config-protect filename for merging, optionally
+ using the last filename if the md5 matches.
+ (dest,md5) ==> 'string' --- path_to_target_filename
+ (dest) ==> ('next', 'highest') --- next_target and most-recent_target
+ """
+
+ # config protection filename format:
+ # ._cfg0000_foo
+ # 0123456789012
+ prot_num = -1
+ last_pfile = ""
+
+ if not os.path.exists(mydest):
+ return mydest
+
+ real_filename = os.path.basename(mydest)
+ real_dirname = os.path.dirname(mydest)
+ for pfile in os.listdir(real_dirname):
+ if pfile[0:5] != "._cfg":
+ continue
+ if pfile[10:] != real_filename:
+ continue
+ try:
+ new_prot_num = int(pfile[5:9])
+ if new_prot_num > prot_num:
+ prot_num = new_prot_num
+ last_pfile = pfile
+ except ValueError:
+ continue
+ prot_num = prot_num + 1
+
+ new_pfile = normalize_path(os.path.join(real_dirname,
+ "._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
+ old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
+ if last_pfile and newmd5:
+ import portage_checksum
+ if portage_checksum.perform_md5(
+ os.path.join(real_dirname, last_pfile)) == newmd5:
+ return old_pfile
+ return new_pfile
diff --git a/pym/portage/versions.py b/pym/portage/versions.py
new file mode 100644
index 00000000..63d69bac
--- /dev/null
+++ b/pym/portage/versions.py
@@ -0,0 +1,314 @@
+# portage_versions.py -- core Portage functionality
+# Copyright 1998-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+import re
+
+ver_regexp = re.compile("^(cvs\\.)?(\\d+)((\\.\\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\\d*)*)(-r(\\d+))?$")
+suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
+suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
+endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
+
+from portage_exception import InvalidData
+
+def ververify(myver, silent=1):
+ if ver_regexp.match(myver):
+ return 1
+ else:
+ if not silent:
+ print "!!! syntax error in version: %s" % myver
+ return 0
+
+vercmp_cache = {}
+def vercmp(ver1, ver2, silent=1):
+ """
+ Compare two versions
+ Example usage:
+ >>> from portage_versions import vercmp
+ >>> vercmp('1.0-r1','1.2-r3')
+ negative number
+ >>> vercmp('1.3','1.2-r3')
+ positive number
+ >>> vercmp('1.0_p3','1.0_p3')
+ 0
+
+ @param pkg1: version to compare with (see ver_regexp in portage_versions.py)
+ @type pkg1: string (example: "2.1.2-r3")
+ @param pkg2: version to compare againts (see ver_regexp in portage_versions.py)
+ @type pkg2: string (example: "2.1.2_rc5")
+ @rtype: None or float
+ @return:
+ 1. positive if ver1 is greater than ver2
+ 2. negative if ver1 is less than ver2
+ 3. 0 if ver1 equals ver2
+ 4. None if ver1 or ver2 are invalid (see ver_regexp in portage_versions.py)
+ """
+
+ if ver1 == ver2:
+ return 0
+ mykey=ver1+":"+ver2
+ try:
+ return vercmp_cache[mykey]
+ except KeyError:
+ pass
+ match1 = ver_regexp.match(ver1)
+ match2 = ver_regexp.match(ver2)
+
+ # checking that the versions are valid
+ if not match1 or not match1.groups():
+ if not silent:
+ print "!!! syntax error in version: %s" % ver1
+ return None
+ if not match2 or not match2.groups():
+ if not silent:
+ print "!!! syntax error in version: %s" % ver2
+ return None
+
+ # shortcut for cvs ebuilds (new style)
+ if match1.group(1) and not match2.group(1):
+ vercmp_cache[mykey] = 1
+ return 1
+ elif match2.group(1) and not match1.group(1):
+ vercmp_cache[mykey] = -1
+ return -1
+
+ # building lists of the version parts before the suffix
+ # first part is simple
+ list1 = [int(match1.group(2))]
+ list2 = [int(match2.group(2))]
+
+ # this part would greatly benefit from a fixed-length version pattern
+ if len(match1.group(3)) or len(match2.group(3)):
+ vlist1 = match1.group(3)[1:].split(".")
+ vlist2 = match2.group(3)[1:].split(".")
+ for i in range(0, max(len(vlist1), len(vlist2))):
+ # Implcit .0 is given a value of -1, so that 1.0.0 > 1.0, since it
+ # would be ambiguous if two versions that aren't literally equal
+ # are given the same value (in sorting, for example).
+ if len(vlist1) <= i or len(vlist1[i]) == 0:
+ list1.append(-1)
+ list2.append(int(vlist2[i]))
+ elif len(vlist2) <= i or len(vlist2[i]) == 0:
+ list1.append(int(vlist1[i]))
+ list2.append(-1)
+ # Let's make life easy and use integers unless we're forced to use floats
+ elif (vlist1[i][0] != "0" and vlist2[i][0] != "0"):
+ list1.append(int(vlist1[i]))
+ list2.append(int(vlist2[i]))
+ # now we have to use floats so 1.02 compares correctly against 1.1
+ else:
+ list1.append(float("0."+vlist1[i]))
+ list2.append(float("0."+vlist2[i]))
+
+ # and now the final letter
+ if len(match1.group(5)):
+ list1.append(ord(match1.group(5)))
+ if len(match2.group(5)):
+ list2.append(ord(match2.group(5)))
+
+ for i in range(0, max(len(list1), len(list2))):
+ if len(list1) <= i:
+ vercmp_cache[mykey] = -1
+ return -1
+ elif len(list2) <= i:
+ vercmp_cache[mykey] = 1
+ return 1
+ elif list1[i] != list2[i]:
+ vercmp_cache[mykey] = list1[i] - list2[i]
+ return list1[i] - list2[i]
+
+ # main version is equal, so now compare the _suffix part
+ list1 = match1.group(6).split("_")[1:]
+ list2 = match2.group(6).split("_")[1:]
+
+ for i in range(0, max(len(list1), len(list2))):
+ if len(list1) <= i:
+ s1 = ("p","0")
+ else:
+ s1 = suffix_regexp.match(list1[i]).groups()
+ if len(list2) <= i:
+ s2 = ("p","0")
+ else:
+ s2 = suffix_regexp.match(list2[i]).groups()
+ if s1[0] != s2[0]:
+ return suffix_value[s1[0]] - suffix_value[s2[0]]
+ if s1[1] != s2[1]:
+ # it's possible that the s(1|2)[1] == ''
+ # in such a case, fudge it.
+ try: r1 = int(s1[1])
+ except ValueError: r1 = 0
+ try: r2 = int(s2[1])
+ except ValueError: r2 = 0
+ return r1 - r2
+
+ # the suffix part is equal to, so finally check the revision
+ if match1.group(10):
+ r1 = int(match1.group(10))
+ else:
+ r1 = 0
+ if match2.group(10):
+ r2 = int(match2.group(10))
+ else:
+ r2 = 0
+ vercmp_cache[mykey] = r1 - r2
+ return r1 - r2
+
+def pkgcmp(pkg1, pkg2):
+ """
+ Compare 2 package versions created in pkgsplit format.
+
+ Example usage:
+ >>> from portage_versions import *
+ >>> pkgcmp(pkgsplit('test-1.0-r1'),pkgsplit('test-1.2-r3'))
+ -1
+ >>> pkgcmp(pkgsplit('test-1.3'),pkgsplit('test-1.2-r3'))
+ 1
+
+ @param pkg1: package to compare with
+ @type pkg1: list (example: ['test', '1.0', 'r1'])
+ @param pkg2: package to compare againts
+ @type pkg2: list (example: ['test', '1.0', 'r1'])
+ @rtype: None or integer
+ @return:
+ 1. None if package names are not the same
+ 2. 1 if pkg1 is greater than pkg2
+ 3. -1 if pkg1 is less than pkg2
+ 4. 0 if pkg1 equals pkg2
+ """
+ if pkg1[0] != pkg2[0]:
+ return None
+ mycmp=vercmp(pkg1[1],pkg2[1])
+ if mycmp>0:
+ return 1
+ if mycmp<0:
+ return -1
+ r1=float(pkg1[2][1:])
+ r2=float(pkg2[2][1:])
+ if r1>r2:
+ return 1
+ if r2>r1:
+ return -1
+ return 0
+
+
+pkgcache={}
+
+def pkgsplit(mypkg,silent=1):
+ try:
+ if not pkgcache[mypkg]:
+ return None
+ return pkgcache[mypkg][:]
+ except KeyError:
+ pass
+ myparts=mypkg.split("-")
+
+ if len(myparts)<2:
+ if not silent:
+ print "!!! Name error in",mypkg+": missing a version or name part."
+ pkgcache[mypkg]=None
+ return None
+ for x in myparts:
+ if len(x)==0:
+ if not silent:
+ print "!!! Name error in",mypkg+": empty \"-\" part."
+ pkgcache[mypkg]=None
+ return None
+
+ #verify rev
+ revok=0
+ myrev=myparts[-1]
+ if len(myrev) and myrev[0]=="r":
+ try:
+ int(myrev[1:])
+ revok=1
+ except ValueError: # from int()
+ pass
+ if revok:
+ verPos = -2
+ revision = myparts[-1]
+ else:
+ verPos = -1
+ revision = "r0"
+
+ if ververify(myparts[verPos]):
+ if len(myparts)== (-1*verPos):
+ pkgcache[mypkg]=None
+ return None
+ else:
+ for x in myparts[:verPos]:
+ if ververify(x):
+ pkgcache[mypkg]=None
+ return None
+ #names can't have versiony looking parts
+ myval=["-".join(myparts[:verPos]),myparts[verPos],revision]
+ pkgcache[mypkg]=myval
+ return myval
+ else:
+ pkgcache[mypkg]=None
+ return None
+
+_valid_category = re.compile("^\w[\w-]*")
+
+catcache={}
+def catpkgsplit(mydata,silent=1):
+ """
+ Takes a Category/Package-Version-Rev and returns a list of each.
+
+ @param mydata: Data to split
+ @type mydata: string
+ @param silent: suppress error messages
+ @type silent: Boolean (integer)
+ @rype: list
+ @return:
+ 1. If each exists, it returns [cat, pkgname, version, rev]
+ 2. If cat is not specificed in mydata, cat will be "null"
+ 3. if rev does not exist it will be '-r0'
+ 4. If cat is invalid (specified but has incorrect syntax)
+ an InvalidData Exception will be thrown
+ """
+
+ # Categories may contain a-zA-z0-9+_- but cannot start with -
+ global _valid_category
+ import portage_dep
+ try:
+ if not catcache[mydata]:
+ return None
+ return catcache[mydata][:]
+ except KeyError:
+ pass
+ mysplit=mydata.split("/")
+ p_split=None
+ if len(mysplit)==1:
+ retval=["null"]
+ p_split=pkgsplit(mydata,silent=silent)
+ elif len(mysplit)==2:
+ if portage_dep._dep_check_strict and \
+ not _valid_category.match(mysplit[0]):
+ raise InvalidData("Invalid category in %s" %mydata )
+ retval=[mysplit[0]]
+ p_split=pkgsplit(mysplit[1],silent=silent)
+ if not p_split:
+ catcache[mydata]=None
+ return None
+ retval.extend(p_split)
+ catcache[mydata]=retval
+ return retval
+
+def catsplit(mydep):
+ return mydep.split("/", 1)
+
+def best(mymatches):
+ """Accepts None arguments; assumes matches are valid."""
+ if mymatches is None:
+ return ""
+ if not len(mymatches):
+ return ""
+ bestmatch = mymatches[0]
+ p2 = catpkgsplit(bestmatch)[1:]
+ for x in mymatches[1:]:
+ p1 = catpkgsplit(x)[1:]
+ if pkgcmp(p1, p2) > 0:
+ bestmatch = x
+ p2 = catpkgsplit(bestmatch)[1:]
+ return bestmatch
diff --git a/pym/portage/xpak.py b/pym/portage/xpak.py
new file mode 100644
index 00000000..b7ef582e
--- /dev/null
+++ b/pym/portage/xpak.py
@@ -0,0 +1,421 @@
+# Copyright 2001-2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id$
+
+
+# The format for a tbz2/xpak:
+#
+# tbz2: tar.bz2 + xpak + (xpak_offset) + "STOP"
+# xpak: "XPAKPACK" + (index_len) + (data_len) + index + data + "XPAKSTOP"
+# index: (pathname_len) + pathname + (data_offset) + (data_len)
+# index entries are concatenated end-to-end.
+# data: concatenated data chunks, end-to-end.
+#
+# [tarball]XPAKPACKIIIIDDDD[index][data]XPAKSTOPOOOOSTOP
+#
+# (integer) == encodeint(integer) ===> 4 characters (big-endian copy)
+# '+' means concatenate the fields ===> All chunks are strings
+
+import sys,os,shutil,errno
+from stat import *
+
+def addtolist(mylist,curdir):
+ """(list, dir) --- Takes an array(list) and appends all files from dir down
+ the directory tree. Returns nothing. list is modified."""
+ for x in os.listdir("."):
+ if os.path.isdir(x):
+ os.chdir(x)
+ addtolist(mylist,curdir+x+"/")
+ os.chdir("..")
+ else:
+ if curdir+x not in mylist:
+ mylist.append(curdir+x)
+
+def encodeint(myint):
+ """Takes a 4 byte integer and converts it into a string of 4 characters.
+ Returns the characters in a string."""
+ part1=chr((myint >> 24 ) & 0x000000ff)
+ part2=chr((myint >> 16 ) & 0x000000ff)
+ part3=chr((myint >> 8 ) & 0x000000ff)
+ part4=chr(myint & 0x000000ff)
+ return part1+part2+part3+part4
+
+def decodeint(mystring):
+ """Takes a 4 byte string and converts it into a 4 byte integer.
+ Returns an integer."""
+ myint=0
+ myint=myint+ord(mystring[3])
+ myint=myint+(ord(mystring[2]) << 8)
+ myint=myint+(ord(mystring[1]) << 16)
+ myint=myint+(ord(mystring[0]) << 24)
+ return myint
+
+def xpak(rootdir,outfile=None):
+ """(rootdir,outfile) -- creates an xpak segment of the directory 'rootdir'
+ and under the name 'outfile' if it is specified. Otherwise it returns the
+ xpak segment."""
+ try:
+ origdir=os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ os.chdir(rootdir)
+ mylist=[]
+
+ addtolist(mylist,"")
+ mylist.sort()
+ mydata = {}
+ for x in mylist:
+ a = open(x, "r")
+ mydata[x] = a.read()
+ a.close()
+ os.chdir(origdir)
+
+ xpak_segment = xpak_mem(mydata)
+ if outfile:
+ outf = open(outfile, "w")
+ outf.write(xpak_segment)
+ outf.close()
+ else:
+ return xpak_segment
+
+def xpak_mem(mydata):
+ """Create an xpack segement from a map object."""
+ indexglob=""
+ indexpos=0
+ dataglob=""
+ datapos=0
+ for x, newglob in mydata.iteritems():
+ mydatasize=len(newglob)
+ indexglob=indexglob+encodeint(len(x))+x+encodeint(datapos)+encodeint(mydatasize)
+ indexpos=indexpos+4+len(x)+4+4
+ dataglob=dataglob+newglob
+ datapos=datapos+mydatasize
+ return "XPAKPACK" \
+ + encodeint(len(indexglob)) \
+ + encodeint(len(dataglob)) \
+ + indexglob \
+ + dataglob \
+ + "XPAKSTOP"
+
+def xsplit(infile):
+ """(infile) -- Splits the infile into two files.
+ 'infile.index' contains the index segment.
+ 'infile.dat' contails the data segment."""
+ myfile=open(infile,"r")
+ mydat=myfile.read()
+ myfile.close()
+
+ splits = xsplit_mem(mydat)
+ if not splits:
+ return False
+
+ myfile=open(infile+".index","w")
+ myfile.write(splits[0])
+ myfile.close()
+ myfile=open(infile+".dat","w")
+ myfile.write(splits[1])
+ myfile.close()
+ return True
+
+def xsplit_mem(mydat):
+ if mydat[0:8]!="XPAKPACK":
+ return None
+ if mydat[-8:]!="XPAKSTOP":
+ return None
+ indexsize=decodeint(mydat[8:12])
+ datasize=decodeint(mydat[12:16])
+ return (mydat[16:indexsize+16], mydat[indexsize+16:-8])
+
+def getindex(infile):
+ """(infile) -- grabs the index segment from the infile and returns it."""
+ myfile=open(infile,"r")
+ myheader=myfile.read(16)
+ if myheader[0:8]!="XPAKPACK":
+ myfile.close()
+ return
+ indexsize=decodeint(myheader[8:12])
+ myindex=myfile.read(indexsize)
+ myfile.close()
+ return myindex
+
+def getboth(infile):
+ """(infile) -- grabs the index and data segments from the infile.
+ Returns an array [indexSegment,dataSegment]"""
+ myfile=open(infile,"r")
+ myheader=myfile.read(16)
+ if myheader[0:8]!="XPAKPACK":
+ myfile.close()
+ return
+ indexsize=decodeint(myheader[8:12])
+ datasize=decodeint(myheader[12:16])
+ myindex=myfile.read(indexsize)
+ mydata=myfile.read(datasize)
+ myfile.close()
+ return myindex, mydata
+
+def listindex(myindex):
+ """Print to the terminal the filenames listed in the indexglob passed in."""
+ for x in getindex_mem(myindex):
+ print x
+
+def getindex_mem(myindex):
+ """Returns the filenames listed in the indexglob passed in."""
+ myindexlen=len(myindex)
+ startpos=0
+ myret=[]
+ while ((startpos+8)<myindexlen):
+ mytestlen=decodeint(myindex[startpos:startpos+4])
+ myret=myret+[myindex[startpos+4:startpos+4+mytestlen]]
+ startpos=startpos+mytestlen+12
+ return myret
+
+def searchindex(myindex,myitem):
+ """(index,item) -- Finds the offset and length of the file 'item' in the
+ datasegment via the index 'index' provided."""
+ mylen=len(myitem)
+ myindexlen=len(myindex)
+ startpos=0
+ while ((startpos+8)<myindexlen):
+ mytestlen=decodeint(myindex[startpos:startpos+4])
+ if mytestlen==mylen:
+ if myitem==myindex[startpos+4:startpos+4+mytestlen]:
+ #found
+ datapos=decodeint(myindex[startpos+4+mytestlen:startpos+8+mytestlen]);
+ datalen=decodeint(myindex[startpos+8+mytestlen:startpos+12+mytestlen]);
+ return datapos, datalen
+ startpos=startpos+mytestlen+12
+
+def getitem(myid,myitem):
+ myindex=myid[0]
+ mydata=myid[1]
+ myloc=searchindex(myindex,myitem)
+ if not myloc:
+ return None
+ return mydata[myloc[0]:myloc[0]+myloc[1]]
+
+def xpand(myid,mydest):
+ myindex=myid[0]
+ mydata=myid[1]
+ try:
+ origdir=os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ os.chdir(mydest)
+ myindexlen=len(myindex)
+ startpos=0
+ while ((startpos+8)<myindexlen):
+ namelen=decodeint(myindex[startpos:startpos+4])
+ datapos=decodeint(myindex[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(myindex[startpos+8+namelen:startpos+12+namelen]);
+ myname=myindex[startpos+4:startpos+4+namelen]
+ dirname=os.path.dirname(myname)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat=open(myname,"w")
+ mydat.write(mydata[datapos:datapos+datalen])
+ mydat.close()
+ startpos=startpos+namelen+12
+ os.chdir(origdir)
+
+class tbz2:
+ def __init__(self,myfile):
+ self.file=myfile
+ self.filestat=None
+ self.index=""
+ self.infosize=0
+ self.xpaksize=0
+ self.indexsize=None
+ self.datasize=None
+ self.indexpos=None
+ self.datapos=None
+ self.scan()
+
+ def decompose(self,datadir,cleanup=1):
+ """Alias for unpackinfo() --- Complement to recompose() but optionally
+ deletes the destination directory. Extracts the xpak from the tbz2 into
+ the directory provided. Raises IOError if scan() fails.
+ Returns result of upackinfo()."""
+ if not self.scan():
+ raise IOError
+ if cleanup:
+ self.cleanup(datadir)
+ if not os.path.exists(datadir):
+ os.makedirs(datadir)
+ return self.unpackinfo(datadir)
+ def compose(self,datadir,cleanup=0):
+ """Alias for recompose()."""
+ return recompose(datadir,cleanup)
+ def recompose(self,datadir,cleanup=0):
+ """Creates an xpak segment from the datadir provided, truncates the tbz2
+ to the end of regular data if an xpak segment already exists, and adds
+ the new segment to the file with terminating info."""
+ xpdata = xpak(datadir)
+ self.recompose_mem(xpdata)
+ if cleanup:
+ self.cleanup(datadir)
+
+ def recompose_mem(self, xpdata):
+ self.scan() # Don't care about condition... We'll rewrite the data anyway.
+ myfile=open(self.file,"a+")
+ if not myfile:
+ raise IOError
+ myfile.seek(-self.xpaksize,2) # 0,2 or -0,2 just mean EOF.
+ myfile.truncate()
+ myfile.write(xpdata+encodeint(len(xpdata))+"STOP")
+ myfile.flush()
+ myfile.close()
+ return 1
+
+ def cleanup(self, datadir):
+ datadir_split = os.path.split(datadir)
+ if len(datadir_split) >= 2 and len(datadir_split[1]) > 0:
+ # This is potentially dangerous,
+ # thus the above sanity check.
+ try:
+ shutil.rmtree(datadir)
+ except OSError, oe:
+ if oe.errno == errno.ENOENT:
+ pass
+ else:
+ raise oe
+
+ def scan(self):
+ """Scans the tbz2 to locate the xpak segment and setup internal values.
+ This function is called by relevant functions already."""
+ try:
+ mystat=os.stat(self.file)
+ if self.filestat:
+ changed=0
+ for x in [ST_SIZE, ST_MTIME, ST_CTIME]:
+ if mystat[x] != self.filestat[x]:
+ changed=1
+ if not changed:
+ return 1
+ self.filestat=mystat
+ a=open(self.file,"r")
+ a.seek(-16,2)
+ trailer=a.read()
+ self.infosize=0
+ self.xpaksize=0
+ if trailer[-4:]!="STOP":
+ a.close()
+ return 0
+ if trailer[0:8]!="XPAKSTOP":
+ a.close()
+ return 0
+ self.infosize=decodeint(trailer[8:12])
+ self.xpaksize=self.infosize+8
+ a.seek(-(self.xpaksize),2)
+ header=a.read(16)
+ if header[0:8]!="XPAKPACK":
+ a.close()
+ return 0
+ self.indexsize=decodeint(header[8:12])
+ self.datasize=decodeint(header[12:16])
+ self.indexpos=a.tell()
+ self.index=a.read(self.indexsize)
+ self.datapos=a.tell()
+ a.close()
+ return 2
+ except SystemExit, e:
+ raise
+ except:
+ return 0
+
+ def filelist(self):
+ """Return an array of each file listed in the index."""
+ if not self.scan():
+ return None
+ return getindex_mem(self.index)
+
+ def getfile(self,myfile,mydefault=None):
+ """Finds 'myfile' in the data segment and returns it."""
+ if not self.scan():
+ return None
+ myresult=searchindex(self.index,myfile)
+ if not myresult:
+ return mydefault
+ a=open(self.file,"r")
+ a.seek(self.datapos+myresult[0],0)
+ myreturn=a.read(myresult[1])
+ a.close()
+ return myreturn
+
+ def getelements(self,myfile):
+ """A split/array representation of tbz2.getfile()"""
+ mydat=self.getfile(myfile)
+ if not mydat:
+ return []
+ return mydat.split()
+
+ def unpackinfo(self,mydest):
+ """Unpacks all the files from the dataSegment into 'mydest'."""
+ if not self.scan():
+ return 0
+ try:
+ origdir=os.getcwd()
+ except SystemExit, e:
+ raise
+ except:
+ os.chdir("/")
+ origdir="/"
+ a=open(self.file,"r")
+ if not os.path.exists(mydest):
+ os.makedirs(mydest)
+ os.chdir(mydest)
+ startpos=0
+ while ((startpos+8)<self.indexsize):
+ namelen=decodeint(self.index[startpos:startpos+4])
+ datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
+ myname=self.index[startpos+4:startpos+4+namelen]
+ dirname=os.path.dirname(myname)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat=open(myname,"w")
+ a.seek(self.datapos+datapos)
+ mydat.write(a.read(datalen))
+ mydat.close()
+ startpos=startpos+namelen+12
+ a.close()
+ os.chdir(origdir)
+ return 1
+
+ def get_data(self):
+ """Returns all the files from the dataSegment as a map object."""
+ if not self.scan():
+ return 0
+ a = open(self.file, "r")
+ mydata = {}
+ startpos=0
+ while ((startpos+8)<self.indexsize):
+ namelen=decodeint(self.index[startpos:startpos+4])
+ datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
+ datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
+ myname=self.index[startpos+4:startpos+4+namelen]
+ a.seek(self.datapos+datapos)
+ mydata[myname] = a.read(datalen)
+ startpos=startpos+namelen+12
+ a.close()
+ return mydata
+
+ def getboth(self):
+ """Returns an array [indexSegment,dataSegment]"""
+ if not self.scan():
+ return None
+
+ a = open(self.file,"r")
+ a.seek(self.datapos)
+ mydata =a.read(self.datasize)
+ a.close()
+
+ return self.index, mydata
+
diff --git a/pym/portage_checksum.py b/pym/portage_checksum.py
index 7f1a89c8..90dfb342 100644..120000
--- a/pym/portage_checksum.py
+++ b/pym/portage_checksum.py
@@ -1,219 +1 @@
-# portage_checksum.py -- core Portage functionality
-# Copyright 1998-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-from portage_const import PRIVATE_PATH,PRELINK_BINARY,HASHING_BLOCKSIZE
-import os
-import errno
-import shutil
-import stat
-import portage_exception
-import portage_exec
-import portage_util
-import portage_locks
-import commands
-import sha
-
-
-# actual hash functions first
-
-#dict of all available hash functions
-hashfunc_map = {}
-
-# We _try_ to load this module. If it fails we do the slightly slower fallback.
-try:
- import fchksum
-
- def md5hash(filename):
- return fchksum.fmd5t(filename)
-
-except ImportError:
- import md5
- def md5hash(filename):
- return pyhash(filename, md5)
-hashfunc_map["MD5"] = md5hash
-
-def sha1hash(filename):
- return pyhash(filename, sha)
-hashfunc_map["SHA1"] = sha1hash
-
-# Keep pycrypto optional for now, there are no internal fallbacks for these
-try:
- import Crypto.Hash.SHA256
-
- def sha256hash(filename):
- return pyhash(filename, Crypto.Hash.SHA256)
- hashfunc_map["SHA256"] = sha256hash
-except ImportError:
- pass
-
-try:
- import Crypto.Hash.RIPEMD
-
- def rmd160hash(filename):
- return pyhash(filename, Crypto.Hash.RIPEMD)
- hashfunc_map["RMD160"] = rmd160hash
-except ImportError:
- pass
-
-def getsize(filename):
- size = os.stat(filename).st_size
- return (size, size)
-hashfunc_map["size"] = getsize
-
-# end actual hash functions
-
-prelink_capable = False
-if os.path.exists(PRELINK_BINARY):
- results = commands.getstatusoutput(PRELINK_BINARY+" --version > /dev/null 2>&1")
- if (results[0] >> 8) == 0:
- prelink_capable=1
- del results
-
-def perform_md5(x, calc_prelink=0):
- return perform_checksum(x, "MD5", calc_prelink)[0]
-
-def perform_all(x, calc_prelink=0):
- mydict = {}
- for k in hashfunc_map.keys():
- mydict[k] = perform_checksum(x, hashfunc_map[k], calc_prelink)[0]
- return mydict
-
-def get_valid_checksum_keys():
- return hashfunc_map.keys()
-
-def verify_all(filename, mydict, calc_prelink=0, strict=0):
- """
- Verify all checksums against a file.
-
- @param filename: File to run the checksums against
- @type filename: String
- @param calc_prelink: Whether or not to reverse prelink before running the checksum
- @type calc_prelink: Integer
- @param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
- @type strict: Integer
- @rtype: Tuple
- @return: Result of the checks and possible message:
- 1) If size fails, False, and a tuple containing a message, the given size, and the actual size
- 2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
- 3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
- 4) If all checks succeed, return True and a fake reason
- """
- # Dict relates to single file only.
- # returns: (passed,reason)
- file_is_ok = True
- reason = "Reason unknown"
- try:
- mysize = os.stat(filename)[stat.ST_SIZE]
- if mydict["size"] != mysize:
- return False,("Filesize does not match recorded size", mysize, mydict["size"])
- except OSError, e:
- if e.errno == errno.ENOENT:
- raise portage_exception.FileNotFound(filename)
- return False, (str(e), None, None)
- for x in mydict.keys():
- if x == "size":
- continue
- elif x in hashfunc_map.keys():
- myhash = perform_checksum(filename, x, calc_prelink=calc_prelink)[0]
- if mydict[x] != myhash:
- if strict:
- raise portage_exception.DigestException, "Failed to verify '$(file)s' on checksum type '%(type)s'" % {"file":filename, "type":x}
- else:
- file_is_ok = False
- reason = (("Failed on %s verification" % x), myhash,mydict[x])
- break
- return file_is_ok,reason
-
-def pyhash(filename, hashobject):
- """
- Run a checksum against a file.
-
- @param filename: File to run the checksum against
- @type filename: String
- @param hashname: The hash object that will execute the checksum on the file
- @type hashname: Object
- @return: The hash and size of the data
- """
- f = open(filename, 'rb')
- blocksize = HASHING_BLOCKSIZE
- data = f.read(blocksize)
- size = 0L
- sum = hashobject.new()
- while data:
- sum.update(data)
- size = size + len(data)
- data = f.read(blocksize)
- f.close()
-
- return (sum.hexdigest(), size)
-
-def perform_checksum(filename, hashname="MD5", calc_prelink=0):
- """
- Run a specific checksum against a file.
-
- @param filename: File to run the checksum against
- @type filename: String
- @param hashname: The type of hash function to run
- @type hashname: String
- @param calc_prelink: Whether or not to reverse prelink before running the checksum
- @type calc_prelink: Integer
- @rtype: Tuple
- @return: The hash and size of the data
- """
- myfilename = filename[:]
- prelink_tmpfile = os.path.join("/", PRIVATE_PATH, "prelink-checksum.tmp." + str(os.getpid()))
- mylock = None
- try:
- if calc_prelink and prelink_capable:
- mylock = portage_locks.lockfile(prelink_tmpfile, wantnewlockfile=1)
- # Create non-prelinked temporary file to checksum.
- # Files rejected by prelink are summed in place.
- retval = portage_exec.spawn([PRELINK_BINARY, "--undo", "-o",
- prelink_tmpfile, filename], fd_pipes={})
- if retval == os.EX_OK:
- myfilename = prelink_tmpfile
- try:
- if hashname not in hashfunc_map:
- raise portage_exception.DigestException(hashname + \
- " hash function not available (needs dev-python/pycrypto)")
- myhash, mysize = hashfunc_map[hashname](myfilename)
- except (OSError, IOError), e:
- if e.errno == errno.ENOENT:
- raise portage_exception.FileNotFound(myfilename)
- raise
- if calc_prelink and prelink_capable:
- try:
- os.unlink(prelink_tmpfile)
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- del e
- return myhash, mysize
- finally:
- if mylock:
- portage_locks.unlockfile(mylock)
-
-def perform_multiple_checksums(filename, hashes=["MD5"], calc_prelink=0):
- """
- Run a group of checksums against a file.
-
- @param filename: File to run the checksums against
- @type filename: String
- @param hashes: A list of checksum functions to run against the file
- @type hashname: List
- @param calc_prelink: Whether or not to reverse prelink before running the checksum
- @type calc_prelink: Integer
- @rtype: Tuple
- @return: A dictionary in the form:
- return_value[hash_name] = (hash_result,size)
- for each given checksum
- """
- rVal = {}
- for x in hashes:
- if x not in hashfunc_map:
- raise portage_exception.DigestException, x+" hash function not available (needs dev-python/pycrypto)"
- rVal[x] = perform_checksum(filename, x, calc_prelink)[0]
- return rVal
+portage/checksum.py \ No newline at end of file
diff --git a/pym/portage_const.py b/pym/portage_const.py
index e1af7cb4..aac7e46c 100644..120000
--- a/pym/portage_const.py
+++ b/pym/portage_const.py
@@ -1,65 +1 @@
-# portage: Constants
-# Copyright 1998-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-# ===========================================================================
-# START OF CONSTANTS -- START OF CONSTANTS -- START OF CONSTANTS -- START OF
-# ===========================================================================
-
-import os
-
-VDB_PATH = "var/db/pkg"
-PRIVATE_PATH = "var/lib/portage"
-CACHE_PATH = "/var/cache/edb"
-DEPCACHE_PATH = CACHE_PATH+"/dep"
-
-USER_CONFIG_PATH = "/etc/portage"
-MODULES_FILE_PATH = USER_CONFIG_PATH+"/modules"
-CUSTOM_PROFILE_PATH = USER_CONFIG_PATH+"/profile"
-
-#PORTAGE_BASE_PATH = "/usr/lib/portage"
-PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(__file__.split(os.sep)[:-2]))
-PORTAGE_BIN_PATH = PORTAGE_BASE_PATH+"/bin"
-PORTAGE_PYM_PATH = PORTAGE_BASE_PATH+"/pym"
-NEWS_LIB_PATH = "/var/lib/gentoo"
-PROFILE_PATH = "/etc/make.profile"
-LOCALE_DATA_PATH = PORTAGE_BASE_PATH+"/locale"
-
-EBUILD_SH_BINARY = PORTAGE_BIN_PATH+"/ebuild.sh"
-MISC_SH_BINARY = PORTAGE_BIN_PATH + "/misc-functions.sh"
-SANDBOX_BINARY = "/usr/bin/sandbox"
-BASH_BINARY = "/bin/bash"
-MOVE_BINARY = "/bin/mv"
-PRELINK_BINARY = "/usr/sbin/prelink"
-
-WORLD_FILE = PRIVATE_PATH + "/world"
-MAKE_CONF_FILE = "/etc/make.conf"
-MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults"
-DEPRECATED_PROFILE_FILE = PROFILE_PATH+"/deprecated"
-USER_VIRTUALS_FILE = USER_CONFIG_PATH+"/virtuals"
-EBUILD_SH_ENV_FILE = USER_CONFIG_PATH+"/bashrc"
-INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env"
-CUSTOM_MIRRORS_FILE = USER_CONFIG_PATH+"/mirrors"
-CONFIG_MEMORY_FILE = PRIVATE_PATH + "/config"
-COLOR_MAP_FILE = USER_CONFIG_PATH + "/color.map"
-
-REPO_NAME_FILE = "repo_name"
-REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
-
-INCREMENTALS=["USE","USE_EXPAND","USE_EXPAND_HIDDEN","FEATURES","ACCEPT_KEYWORDS","ACCEPT_LICENSE","CONFIG_PROTECT_MASK","CONFIG_PROTECT","PRELINK_PATH","PRELINK_PATH_MASK"]
-EBUILD_PHASES = ["setup", "unpack", "compile", "test", "install",
- "preinst", "postinst", "prerm", "postrm", "other"]
-
-EAPI = 0
-
-HASHING_BLOCKSIZE = 32768
-MANIFEST1_HASH_FUNCTIONS = ["MD5","SHA256","RMD160"]
-MANIFEST2_HASH_FUNCTIONS = ["SHA1","SHA256","RMD160"]
-MANIFEST2_REQUIRED_HASH = "SHA1"
-
-MANIFEST2_IDENTIFIERS = ["AUX","MISC","DIST","EBUILD"]
-# ===========================================================================
-# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
-# ===========================================================================
+portage/const.py \ No newline at end of file
diff --git a/pym/portage_data.py b/pym/portage_data.py
index 707c76b2..55990ebc 100644..120000
--- a/pym/portage_data.py
+++ b/pym/portage_data.py
@@ -1,126 +1 @@
-# portage_data.py -- Calculated/Discovered Data Values
-# Copyright 1998-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-if not hasattr(__builtins__, "set"):
- from sets import Set as set
-
-import os,pwd,grp
-from portage_util import writemsg
-from output import green,red
-from output import create_color_func
-bad = create_color_func("BAD")
-
-ostype=os.uname()[0]
-
-lchown = None
-if ostype=="Linux" or ostype.lower().endswith("gnu"):
- userland="GNU"
- os.environ["XARGS"]="xargs -r"
-elif ostype == "Darwin":
- userland="Darwin"
- os.environ["XARGS"]="xargs"
- def lchown(*pos_args, **key_args):
- pass
-elif ostype.endswith("BSD") or ostype =="DragonFly":
- userland="BSD"
- os.environ["XARGS"]="xargs"
-else:
- writemsg(red("Operating system")+" \""+ostype+"\" "+red("currently unsupported. Exiting.")+"\n")
- sys.exit(1)
-
-if not lchown:
- if "lchown" in dir(os):
- # Included in python-2.3
- lchown = os.lchown
- else:
- try:
- import missingos
- lchown = missingos.lchown
- except ImportError:
- def lchown(*pos_args, **key_args):
- writemsg(red("!!!") + " It seems that os.lchown does not" + \
- " exist. Please rebuild python.\n", noiselevel=-1)
- lchown()
-
-os.environ["USERLAND"]=userland
-
-def portage_group_warning():
- warn_prefix = bad("*** WARNING *** ")
- mylines = [
- "For security reasons, only system administrators should be",
- "allowed in the portage group. Untrusted users or processes",
- "can potentially exploit the portage group for attacks such as",
- "local privilege escalation."
- ]
- for x in mylines:
- writemsg(warn_prefix, noiselevel=-1)
- writemsg(x, noiselevel=-1)
- writemsg("\n", noiselevel=-1)
- writemsg("\n", noiselevel=-1)
-
-# Portage has 3 security levels that depend on the uid and gid of the main
-# process and are assigned according to the following table:
-#
-# Privileges secpass uid gid
-# normal 0 any any
-# group 1 any portage_gid
-# super 2 0 any
-#
-# If the "wheel" group does not exist then wheelgid falls back to 0.
-# If the "portage" group does not exist then portage_uid falls back to wheelgid.
-
-secpass=0
-
-uid=os.getuid()
-wheelgid=0
-
-if uid==0:
- secpass=2
-try:
- wheelgid=grp.getgrnam("wheel")[2]
-except KeyError:
- writemsg("portage initialization: your system doesn't have a 'wheel' group.\n")
- writemsg("Please fix this as it is a normal system requirement. 'wheel' is GID 10\n")
- writemsg("`emerge baselayout` and a config update with dispatch-conf, etc-update\n")
- writemsg("or cfg-update should remedy this problem.\n")
- pass
-
-#Discover the uid and gid of the portage user/group
-try:
- portage_uid=pwd.getpwnam("portage")[2]
- portage_gid=grp.getgrnam("portage")[2]
- if secpass < 1 and portage_gid in os.getgroups():
- secpass=1
-except KeyError:
- portage_uid=0
- portage_gid=0
- writemsg("\n")
- writemsg( red("portage: 'portage' user or group missing. Please update baselayout\n"))
- writemsg( red(" and merge portage user(250) and group(250) into your passwd\n"))
- writemsg( red(" and group files. Non-root compilation is disabled until then.\n"))
- writemsg( " Also note that non-root/wheel users will need to be added to\n")
- writemsg( " the portage group to do portage commands.\n")
- writemsg("\n")
- writemsg( " For the defaults, line 1 goes into passwd, and 2 into group.\n")
- writemsg(green(" portage:x:250:250:portage:/var/tmp/portage:/bin/false\n"))
- writemsg(green(" portage::250:portage\n"))
- writemsg("\n")
- portage_group_warning()
-
-userpriv_groups = [portage_gid]
-if secpass >= 2:
- # Get a list of group IDs for the portage user. Do not use grp.getgrall()
- # since it is known to trigger spurious SIGPIPE problems with nss_ldap.
- from commands import getstatusoutput
- mystatus, myoutput = getstatusoutput("id -G portage")
- if mystatus == os.EX_OK:
- for x in myoutput.split():
- try:
- userpriv_groups.append(int(x))
- except ValueError:
- pass
- del x
- userpriv_groups = list(set(userpriv_groups))
- del getstatusoutput, mystatus, myoutput
+portage/data.py \ No newline at end of file
diff --git a/pym/portage_debug.py b/pym/portage_debug.py
index 2ee8bcf2..52af90d3 100644..120000
--- a/pym/portage_debug.py
+++ b/pym/portage_debug.py
@@ -1,115 +1 @@
-# Copyright 1999-2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Header: $
-
-import os, sys, threading
-
-import portage_const
-from portage_util import writemsg
-
-def set_trace(on=True):
- if on:
- t = trace_handler()
- threading.settrace(t.event_handler)
- sys.settrace(t.event_handler)
- else:
- sys.settrace(None)
- threading.settrace(None)
-
-class trace_handler(object):
-
- def __init__(self):
- python_system_paths = []
- for x in sys.path:
- if os.path.basename(x).startswith("python2."):
- python_system_paths.append(x)
-
- self.ignore_prefixes = []
- for x in python_system_paths:
- self.ignore_prefixes.append(x + os.sep)
-
- self.trim_filename = prefix_trimmer(os.path.join(portage_const.PORTAGE_BASE_PATH, "pym") + os.sep).trim
- self.show_local_lines = False
- self.max_repr_length = 200
-
- def event_handler(self, *args):
- frame, event, arg = args
- if "line" == event:
- if self.show_local_lines:
- self.trace_line(*args)
- else:
- if not self.ignore_filename(frame.f_code.co_filename):
- self.trace_event(*args)
- return self.event_handler
-
- def trace_event(self, frame, event, arg):
- writemsg("%s line=%d name=%s event=%s %slocals=%s\n" % \
- (self.trim_filename(frame.f_code.co_filename),
- frame.f_lineno,
- frame.f_code.co_name,
- event,
- self.arg_repr(frame, event, arg),
- self.locals_repr(frame, event, arg)))
-
- def arg_repr(self, frame, event, arg):
- my_repr = None
- if "return" == event:
- my_repr = repr(arg)
- if len(my_repr) > self.max_repr_length:
- my_repr = "'omitted'"
- return "value=%s " % my_repr
- elif "exception" == event:
- my_repr = repr(arg[1])
- if len(my_repr) > self.max_repr_length:
- my_repr = "'omitted'"
- return "type=%s value=%s " % (arg[0], my_repr)
-
- return ""
-
- def trace_line(self, frame, event, arg):
- writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
-
- def ignore_filename(self, filename):
- if filename:
- for x in self.ignore_prefixes:
- if filename.startswith(x):
- return True
- return False
-
- def locals_repr(self, frame, event, arg):
- """Create a representation of the locals dict that is suitable for
- tracing output."""
-
- my_locals = frame.f_locals.copy()
-
- # prevent unsafe __repr__ call on self when __init__ is called
- # (method calls aren't safe until after __init__ has completed).
- if frame.f_code.co_name == "__init__" and "self" in my_locals:
- my_locals["self"] = "omitted"
-
- # We omit items that will lead to unreasonable bloat of the trace
- # output (and resulting log file).
- for k, v in my_locals.iteritems():
- my_repr = repr(v)
- if len(my_repr) > self.max_repr_length:
- my_locals[k] = "omitted"
- return my_locals
-
-class prefix_trimmer(object):
- def __init__(self, prefix):
- self.prefix = prefix
- self.cut_index = len(prefix)
- self.previous = None
- self.previous_trimmed = None
-
- def trim(self, s):
- """Remove a prefix from the string and return the result.
- The previous result is automatically cached."""
- if s == self.previous:
- return self.previous_trimmed
- else:
- if s.startswith(self.prefix):
- self.previous_trimmed = s[self.cut_index:]
- else:
- self.previous_trimmed = s
- return self.previous_trimmed
+portage/debug.py \ No newline at end of file
diff --git a/pym/portage_dep.py b/pym/portage_dep.py
index bf40452a..e16bb2a7 100644..120000
--- a/pym/portage_dep.py
+++ b/pym/portage_dep.py
@@ -1,646 +1 @@
-# deps.py -- Portage dependency resolution functions
-# Copyright 2003-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-# DEPEND SYNTAX:
-#
-# 'use?' only affects the immediately following word!
-# Nesting is the only legal way to form multiple '[!]use?' requirements.
-#
-# Where: 'a' and 'b' are use flags, and 'z' is a depend atom.
-#
-# "a? z" -- If 'a' in [use], then b is valid.
-# "a? ( z )" -- Syntax with parenthesis.
-# "a? b? z" -- Deprecated.
-# "a? ( b? z )" -- Valid
-# "a? ( b? ( z ) ) -- Valid
-#
-
-import re, sys, types
-import portage_exception
-from portage_exception import InvalidData
-from portage_versions import catpkgsplit, catsplit, pkgcmp, pkgsplit, ververify
-
-def cpvequal(cpv1, cpv2):
- split1 = catpkgsplit(cpv1)
- split2 = catpkgsplit(cpv2)
-
- if not split1 or not split2:
- raise portage_exception.PortageException("Invalid data '%s, %s', parameter was not a CPV" % (cpv1, cpv2))
-
- if split1[0] != split2[0]:
- return False
-
- return (pkgcmp(split1[1:], split2[1:]) == 0)
-
-def strip_empty(myarr):
- """
- Strip all empty elements from an array
-
- @param myarr: The list of elements
- @type myarr: List
- @rtype: Array
- @return: The array with empty elements removed
- """
- for x in range(len(myarr)-1, -1, -1):
- if not myarr[x]:
- del myarr[x]
- return myarr
-
-def paren_reduce(mystr,tokenize=1):
- """
- Take a string and convert all paren enclosed entities into sublists, optionally
- futher splitting the list elements by spaces.
-
- Example usage:
- >>> paren_reduce('foobar foo ( bar baz )',1)
- ['foobar', 'foo', ['bar', 'baz']]
- >>> paren_reduce('foobar foo ( bar baz )',0)
- ['foobar foo ', [' bar baz ']]
-
- @param mystr: The string to reduce
- @type mystr: String
- @param tokenize: Split on spaces to produces further list breakdown
- @type tokenize: Integer
- @rtype: Array
- @return: The reduced string in an array
- """
- mylist = []
- while mystr:
- if ("(" not in mystr) and (")" not in mystr):
- freesec = mystr
- subsec = None
- tail = ""
- elif mystr[0] == ")":
- return [mylist,mystr[1:]]
- elif ("(" in mystr) and (mystr.index("(") < mystr.index(")")):
- freesec,subsec = mystr.split("(",1)
- subsec,tail = paren_reduce(subsec,tokenize)
- else:
- subsec,tail = mystr.split(")",1)
- if tokenize:
- subsec = strip_empty(subsec.split(" "))
- return [mylist+subsec,tail]
- return mylist+[subsec],tail
- mystr = tail
- if freesec:
- if tokenize:
- mylist = mylist + strip_empty(freesec.split(" "))
- else:
- mylist = mylist + [freesec]
- if subsec is not None:
- mylist = mylist + [subsec]
- return mylist
-
-def paren_enclose(mylist):
- """
- Convert a list to a string with sublists enclosed with parens.
-
- Example usage:
- >>> test = ['foobar','foo',['bar','baz']]
- >>> paren_enclose(test)
- 'foobar foo ( bar baz )'
-
- @param mylist: The list
- @type mylist: List
- @rtype: String
- @return: The paren enclosed string
- """
- mystrparts = []
- for x in mylist:
- if isinstance(x, list):
- mystrparts.append("( "+paren_enclose(x)+" )")
- else:
- mystrparts.append(x)
- return " ".join(mystrparts)
-
-# This is just for use by emerge so that it can enable a backward compatibility
-# mode in order to gracefully deal with installed packages that have invalid
-# atoms or dep syntax.
-_dep_check_strict = True
-
-def use_reduce(deparray, uselist=[], masklist=[], matchall=0, excludeall=[]):
- """
- Takes a paren_reduce'd array and reduces the use? conditionals out
- leaving an array with subarrays
-
- @param deparray: paren_reduce'd list of deps
- @type deparray: List
- @param uselist: List of use flags
- @type uselist: List
- @param masklist: List of masked flags
- @type masklist: List
- @param matchall: Resolve all conditional deps unconditionally. Used by repoman
- @type matchall: Integer
- @rtype: List
- @return: The use reduced depend array
- """
- # Quick validity checks
- for x in range(len(deparray)):
- if deparray[x] in ["||","&&"]:
- if len(deparray) - 1 == x or not isinstance(deparray[x+1], list):
- raise portage_exception.InvalidDependString(deparray[x]+" missing atom list in \""+paren_enclose(deparray)+"\"")
- if deparray and deparray[-1] and deparray[-1][-1] == "?":
- raise portage_exception.InvalidDependString("Conditional without target in \""+paren_enclose(deparray)+"\"")
-
- global _dep_check_strict
-
- mydeparray = deparray[:]
- rlist = []
- while mydeparray:
- head = mydeparray.pop(0)
-
- if type(head) == types.ListType:
- additions = use_reduce(head, uselist, masklist, matchall, excludeall)
- if additions:
- rlist.append(additions)
- elif rlist and rlist[-1] == "||":
- #XXX: Currently some DEPEND strings have || lists without default atoms.
- # raise portage_exception.InvalidDependString("No default atom(s) in \""+paren_enclose(deparray)+"\"")
- rlist.append([])
-
- else:
- if head[-1] == "?": # Use reduce next group on fail.
- # Pull any other use conditions and the following atom or list into a separate array
- newdeparray = [head]
- while isinstance(newdeparray[-1], str) and newdeparray[-1][-1] == "?":
- if mydeparray:
- newdeparray.append(mydeparray.pop(0))
- else:
- raise ValueError, "Conditional with no target."
-
- # Deprecation checks
- warned = 0
- if len(newdeparray[-1]) == 0:
- sys.stderr.write("Note: Empty target in string. (Deprecated)\n")
- warned = 1
- if len(newdeparray) != 2:
- sys.stderr.write("Note: Nested use flags without parenthesis (Deprecated)\n")
- warned = 1
- if warned:
- sys.stderr.write(" --> "+" ".join(map(str,[head]+newdeparray))+"\n")
-
- # Check that each flag matches
- ismatch = True
- for head in newdeparray[:-1]:
- head = head[:-1]
- if head[0] == "!":
- head_key = head[1:]
- if not matchall and head_key in uselist or \
- head_key in excludeall:
- ismatch = False
- break
- elif head not in masklist:
- if not matchall and head not in uselist:
- ismatch = False
- break
- else:
- ismatch = False
-
- # If they all match, process the target
- if ismatch:
- target = newdeparray[-1]
- if isinstance(target, list):
- additions = use_reduce(target, uselist, masklist, matchall, excludeall)
- if additions:
- rlist.append(additions)
- elif not _dep_check_strict:
- # The old deprecated behavior.
- rlist.append(target)
- else:
- raise portage_exception.InvalidDependString(
- "Conditional without parenthesis: '%s?'" % head)
-
- else:
- rlist += [head]
-
- return rlist
-
-
-def dep_opconvert(deplist):
- """
- Iterate recursively through a list of deps, if the
- dep is a '||' or '&&' operator, combine it with the
- list of deps that follows..
-
- Example usage:
- >>> test = ["blah", "||", ["foo", "bar", "baz"]]
- >>> dep_opconvert(test)
- ['blah', ['||', 'foo', 'bar', 'baz']]
-
- @param deplist: A list of deps to format
- @type mydep: List
- @rtype: List
- @return:
- The new list with the new ordering
- """
-
- retlist = []
- x = 0
- while x != len(deplist):
- if isinstance(deplist[x], list):
- retlist.append(dep_opconvert(deplist[x]))
- elif deplist[x] == "||" or deplist[x] == "&&":
- retlist.append([deplist[x]] + dep_opconvert(deplist[x+1]))
- x += 1
- else:
- retlist.append(deplist[x])
- x += 1
- return retlist
-
-def get_operator(mydep):
- """
- Return the operator used in a depstring.
-
- Example usage:
- >>> from portage_dep import *
- >>> get_operator(">=test-1.0")
- '>='
-
- @param mydep: The dep string to check
- @type mydep: String
- @rtype: String
- @return: The operator. One of:
- '~', '=', '>', '<', '=*', '>=', or '<='
- """
- if mydep[0] == "~":
- operator = "~"
- elif mydep[0] == "=":
- if mydep[-1] == "*":
- operator = "=*"
- else:
- operator = "="
- elif mydep[0] in "><":
- if len(mydep) > 1 and mydep[1] == "=":
- operator = mydep[0:2]
- else:
- operator = mydep[0]
- else:
- operator = None
-
- return operator
-
-_dep_getcpv_cache = {}
-
-def dep_getcpv(mydep):
- """
- Return the category-package-version with any operators/slot specifications stripped off
-
- Example usage:
- >>> dep_getcpv('>=media-libs/test-3.0')
- 'media-libs/test-3.0'
-
- @param mydep: The depstring
- @type mydep: String
- @rtype: String
- @return: The depstring with the operator removed
- """
- global _dep_getcpv_cache
- retval = _dep_getcpv_cache.get(mydep, None)
- if retval is not None:
- return retval
- mydep_orig = mydep
- if mydep and mydep[0] == "*":
- mydep = mydep[1:]
- if mydep and mydep[-1] == "*":
- mydep = mydep[:-1]
- if mydep and mydep[0] == "!":
- mydep = mydep[1:]
- if mydep[:2] in [">=", "<="]:
- mydep = mydep[2:]
- elif mydep[:1] in "=<>~":
- mydep = mydep[1:]
- colon = mydep.rfind(":")
- if colon != -1:
- mydep = mydep[:colon]
- _dep_getcpv_cache[mydep_orig] = mydep
- return mydep
-
-def dep_getslot(mydep):
- """
- Retrieve the slot on a depend.
-
- Example usage:
- >>> dep_getslot('app-misc/test:3')
- '3'
-
- @param mydep: The depstring to retrieve the slot of
- @type mydep: String
- @rtype: String
- @return: The slot
- """
- colon = mydep.rfind(":")
- if colon != -1:
- return mydep[colon+1:]
- return None
-
-_invalid_atom_chars_regexp = re.compile("[()|?]")
-
-def isvalidatom(atom, allow_blockers=False):
- """
- Check to see if a depend atom is valid
-
- Example usage:
- >>> isvalidatom('media-libs/test-3.0')
- 0
- >>> isvalidatom('>=media-libs/test-3.0')
- 1
-
- @param atom: The depend atom to check against
- @type atom: String
- @rtype: Integer
- @return: One of the following:
- 1) 0 if the atom is invalid
- 2) 1 if the atom is valid
- """
- global _invalid_atom_chars_regexp
- if _invalid_atom_chars_regexp.search(atom):
- return 0
- if allow_blockers and atom.startswith("!"):
- atom = atom[1:]
- try:
- mycpv_cps = catpkgsplit(dep_getcpv(atom))
- except InvalidData:
- return 0
- operator = get_operator(atom)
- if operator:
- if operator[0] in "<>" and atom[-1] == "*":
- return 0
- if mycpv_cps and mycpv_cps[0] != "null":
- # >=cat/pkg-1.0
- return 1
- else:
- # >=cat/pkg or >=pkg-1.0 (no category)
- return 0
- if mycpv_cps:
- # cat/pkg-1.0
- return 0
-
- if (len(atom.split('/')) == 2):
- # cat/pkg
- return 1
- else:
- return 0
-
-def isjustname(mypkg):
- """
- Checks to see if the depstring is only the package name (no version parts)
-
- Example usage:
- >>> isjustname('media-libs/test-3.0')
- 0
- >>> isjustname('test')
- 1
- >>> isjustname('media-libs/test')
- 1
-
- @param mypkg: The package atom to check
- @param mypkg: String
- @rtype: Integer
- @return: One of the following:
- 1) 0 if the package string is not just the package name
- 2) 1 if it is
- """
- myparts = mypkg.split('-')
- for x in myparts:
- if ververify(x):
- return 0
- return 1
-
-iscache = {}
-
-def isspecific(mypkg):
- """
- Checks to see if a package is in category/package-version or package-version format,
- possibly returning a cached result.
-
- Example usage:
- >>> isspecific('media-libs/test')
- 0
- >>> isspecific('media-libs/test-3.0')
- 1
-
- @param mypkg: The package depstring to check against
- @type mypkg: String
- @rtype: Integer
- @return: One of the following:
- 1) 0 if the package string is not specific
- 2) 1 if it is
- """
- try:
- return iscache[mypkg]
- except KeyError:
- pass
- mysplit = mypkg.split("/")
- if not isjustname(mysplit[-1]):
- iscache[mypkg] = 1
- return 1
- iscache[mypkg] = 0
- return 0
-
-def dep_getkey(mydep):
- """
- Return the category/package-name of a depstring.
-
- Example usage:
- >>> dep_getkey('media-libs/test-3.0')
- 'media-libs/test'
-
- @param mydep: The depstring to retrieve the category/package-name of
- @type mydep: String
- @rtype: String
- @return: The package category/package-version
- """
- mydep = dep_getcpv(mydep)
- if mydep and isspecific(mydep):
- mysplit = catpkgsplit(mydep)
- if not mysplit:
- return mydep
- return mysplit[0] + "/" + mysplit[1]
- else:
- return mydep
-
-def match_to_list(mypkg, mylist):
- """
- Searches list for entries that matches the package.
-
- @param mypkg: The package atom to match
- @type mypkg: String
- @param mylist: The list of package atoms to compare against
- @param mylist: List
- @rtype: List
- @return: A unique list of package atoms that match the given package atom
- """
- matches = []
- for x in mylist:
- if match_from_list(x, [mypkg]):
- if x not in matches:
- matches.append(x)
- return matches
-
-def best_match_to_list(mypkg, mylist):
- """
- Returns the most specific entry that matches the package given.
-
- @param mypkg: The package atom to check
- @type mypkg: String
- @param mylist: The list of package atoms to check against
- @type mylist: List
- @rtype: String
- @return: The package atom which best matches given the following ordering:
- - =cpv 6
- - ~cpv 5
- - =cpv* 4
- - cp:slot 3
- - >cpv 2
- - <cpv 2
- - >=cpv 2
- - <=cpv 2
- - cp 1
- """
- operator_values = {'=':6, '~':5, '=*':4,
- '>':2, '<':2, '>=':2, '<=':2, None:1}
- maxvalue = 0
- bestm = None
- for x in match_to_list(mypkg, mylist):
- if dep_getslot(x) is not None:
- if maxvalue < 3:
- maxvalue = 3
- bestm = x
- continue
- op_val = operator_values[get_operator(x)]
- if op_val > maxvalue:
- maxvalue = op_val
- bestm = x
- return bestm
-
-_match_from_list_cache = {}
-
-def match_from_list(mydep, candidate_list):
- """
- Searches list for entries that matches the package.
-
- @param mydep: The package atom to match
- @type mydep: String
- @param candidate_list: The list of package atoms to compare against
- @param candidate_list: List
- @rtype: List
- @return: A list of package atoms that match the given package atom
- """
-
- global _match_from_list_cache
- cache_key = (mydep, tuple(candidate_list))
- mylist = _match_from_list_cache.get(cache_key, None)
- if mylist is not None:
- return mylist[:]
-
- from portage_util import writemsg
- if mydep[0] == "!":
- mydep = mydep[1:]
-
- mycpv = dep_getcpv(mydep)
- mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
- slot = None
-
- if not mycpv_cps:
- cat, pkg = catsplit(mycpv)
- ver = None
- rev = None
- slot = dep_getslot(mydep)
- else:
- cat, pkg, ver, rev = mycpv_cps
- if mydep == mycpv:
- raise KeyError("Specific key requires an operator" + \
- " (%s) (try adding an '=')" % (mydep))
-
- if ver and rev:
- operator = get_operator(mydep)
- if not operator:
- writemsg("!!! Invalid atom: %s\n" % mydep, noiselevel=-1)
- return []
- else:
- operator = None
-
- mylist = []
-
- if operator is None:
- for x in candidate_list:
- xs = pkgsplit(x)
- if xs is None:
- xcpv = dep_getcpv(x)
- if slot is not None:
- xslot = dep_getslot(x)
- if xslot is not None and xslot != slot:
- """ This function isn't given enough information to
- reject atoms based on slot unless *both* compared atoms
- specify slots."""
- continue
- if xcpv != mycpv:
- continue
- elif xs[0] != mycpv:
- continue
- mylist.append(x)
-
- elif operator == "=": # Exact match
- mylist = [cpv for cpv in candidate_list if cpvequal(cpv, mycpv)]
-
- elif operator == "=*": # glob match
- # XXX: Nasty special casing for leading zeros
- # Required as =* is a literal prefix match, so can't
- # use vercmp
- mysplit = catpkgsplit(mycpv)
- myver = mysplit[2].lstrip("0")
- if not myver or not myver[0].isdigit():
- myver = "0"+myver
- mycpv = mysplit[0]+"/"+mysplit[1]+"-"+myver
- for x in candidate_list:
- xs = catpkgsplit(x)
- myver = xs[2].lstrip("0")
- if not myver or not myver[0].isdigit():
- myver = "0"+myver
- xcpv = xs[0]+"/"+xs[1]+"-"+myver
- if xcpv.startswith(mycpv):
- mylist.append(x)
-
- elif operator == "~": # version, any revision, match
- for x in candidate_list:
- xs = catpkgsplit(x)
- if xs is None:
- raise InvalidData(x)
- if not cpvequal(xs[0]+"/"+xs[1]+"-"+xs[2], mycpv_cps[0]+"/"+mycpv_cps[1]+"-"+mycpv_cps[2]):
- continue
- if xs[2] != ver:
- continue
- mylist.append(x)
-
- elif operator in [">", ">=", "<", "<="]:
- mysplit = ["%s/%s" % (cat, pkg), ver, rev]
- for x in candidate_list:
- try:
- result = pkgcmp(pkgsplit(x), mysplit)
- except ValueError: # pkgcmp may return ValueError during int() conversion
- writemsg("\nInvalid package name: %s\n" % x, noiselevel=-1)
- raise
- if result is None:
- continue
- elif operator == ">":
- if result > 0:
- mylist.append(x)
- elif operator == ">=":
- if result >= 0:
- mylist.append(x)
- elif operator == "<":
- if result < 0:
- mylist.append(x)
- elif operator == "<=":
- if result <= 0:
- mylist.append(x)
- else:
- raise KeyError("Unknown operator: %s" % mydep)
- else:
- raise KeyError("Unknown operator: %s" % mydep)
-
- _match_from_list_cache[cache_key] = mylist
- return mylist
+portage/dep.py \ No newline at end of file
diff --git a/pym/portage_exception.py b/pym/portage_exception.py
index 4be72cf9..c3c090d0 100644..120000
--- a/pym/portage_exception.py
+++ b/pym/portage_exception.py
@@ -1,100 +1 @@
-# Copyright 1998-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-class PortageException(Exception):
- """General superclass for portage exceptions"""
- def __init__(self,value):
- self.value = value[:]
- def __str__(self):
- if isinstance(self.value, basestring):
- return self.value
- else:
- return repr(self.value)
-
-class CorruptionError(PortageException):
- """Corruption indication"""
-
-class InvalidDependString(PortageException):
- """An invalid depend string has been encountered"""
-
-class InvalidVersionString(PortageException):
- """An invalid version string has been encountered"""
-
-class SecurityViolation(PortageException):
- """An incorrect formatting was passed instead of the expected one"""
-
-class IncorrectParameter(PortageException):
- """A parameter of the wrong type was passed"""
-
-class MissingParameter(PortageException):
- """A parameter is required for the action requested but was not passed"""
-
-class ParseError(PortageException):
- """An error was generated while attempting to parse the request"""
-
-class InvalidData(PortageException):
- """An incorrect formatting was passed instead of the expected one"""
-
-class InvalidDataType(PortageException):
- """An incorrect type was passed instead of the expected one"""
-
-class InvalidLocation(PortageException):
- """Data was not found when it was expected to exist or was specified incorrectly"""
-
-class FileNotFound(InvalidLocation):
- """A file was not found when it was expected to exist"""
-
-class DirectoryNotFound(InvalidLocation):
- """A directory was not found when it was expected to exist"""
-
-class OperationNotPermitted(PortageException):
- """An operation was not permitted operating system"""
-
-class PermissionDenied(PortageException):
- """Permission denied"""
-
-class ReadOnlyFileSystem(PortageException):
- """Read-only file system"""
-
-class CommandNotFound(PortageException):
- """A required binary was not available or executable"""
-
-
-class PortagePackageException(PortageException):
- """Malformed or missing package data"""
-
-class PackageNotFound(PortagePackageException):
- """Missing Ebuild or Binary"""
-
-class InvalidPackageName(PortagePackageException):
- """Malformed package name"""
-
-class InvalidAtom(PortagePackageException):
- """Malformed atom spec"""
-
-class UnsupportedAPIException(PortagePackageException):
- """Unsupported API"""
- def __init__(self, cpv, eapi):
- self.cpv, self.eapi = cpv, eapi
- def __str__(self):
- return "Unable to do any operations on '%s', due to the fact it's EAPI is higher then this portage versions. Please upgrade to a portage version that supports EAPI %s" % (self.cpv, self.eapi)
-
-
-
-class SignatureException(PortageException):
- """Signature was not present in the checked file"""
-
-class DigestException(SignatureException):
- """A problem exists in the digest"""
-
-class MissingSignature(SignatureException):
- """Signature was not present in the checked file"""
-
-class InvalidSignature(SignatureException):
- """Signature was checked and was not a valid, current, nor trusted signature"""
-
-class UntrustedSignature(SignatureException):
- """Signature was not certified to the desired security level"""
-
+portage/exception.py \ No newline at end of file
diff --git a/pym/portage_exec.py b/pym/portage_exec.py
index 252fed2a..3a56f3db 100644..120000
--- a/pym/portage_exec.py
+++ b/pym/portage_exec.py
@@ -1,336 +1 @@
-# portage.py -- core Portage functionality
-# Copyright 1998-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-import os, atexit, signal, sys
-import portage_data
-
-from portage_util import dump_traceback
-from portage_const import BASH_BINARY, SANDBOX_BINARY
-
-
-try:
- import resource
- max_fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
-except ImportError:
- max_fd_limit = 256
-
-if os.path.isdir("/proc/%i/fd" % os.getpid()):
- def get_open_fds():
- return map(int, [fd for fd in os.listdir("/proc/%i/fd" % os.getpid()) if fd.isdigit()])
-else:
- def get_open_fds():
- return xrange(max_fd_limit)
-
-sandbox_capable = (os.path.isfile(SANDBOX_BINARY) and
- os.access(SANDBOX_BINARY, os.X_OK))
-
-def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
- """
- Spawns a bash shell running a specific commands
-
- @param mycommand: The command for bash to run
- @type mycommand: String
- @param debug: Turn bash debugging on (set -x)
- @type debug: Boolean
- @param opt_name: Name of the spawned process (detaults to binary name)
- @type opt_name: String
- @param keywords: Extra Dictionary arguments to pass to spawn
- @type keywords: Dictionary
- """
-
- args = [BASH_BINARY]
- if not opt_name:
- opt_name = os.path.basename(mycommand.split()[0])
- if debug:
- # Print commands and their arguments as they are executed.
- args.append("-x")
- args.append("-c")
- args.append(mycommand)
- return spawn(args, opt_name=opt_name, **keywords)
-
-def spawn_sandbox(mycommand, opt_name=None, **keywords):
- if not sandbox_capable:
- return spawn_bash(mycommand, opt_name=opt_name, **keywords)
- args=[SANDBOX_BINARY]
- if not opt_name:
- opt_name = os.path.basename(mycommand.split()[0])
- args.append(mycommand)
- return spawn(args, opt_name=opt_name, **keywords)
-
-_exithandlers = []
-def atexit_register(func, *args, **kargs):
- """Wrapper around atexit.register that is needed in order to track
- what is registered. For example, when portage restarts itself via
- os.execv, the atexit module does not work so we have to do it
- manually by calling the run_exitfuncs() function in this module."""
- _exithandlers.append((func, args, kargs))
-
-def run_exitfuncs():
- """This should behave identically to the routine performed by
- the atexit module at exit time. It's only necessary to call this
- function when atexit will not work (because of os.execv, for
- example)."""
-
- # This function is a copy of the private atexit._run_exitfuncs()
- # from the python 2.4.2 sources. The only difference from the
- # original function is in the output to stderr.
- exc_info = None
- while _exithandlers:
- func, targs, kargs = _exithandlers.pop()
- try:
- func(*targs, **kargs)
- except SystemExit:
- exc_info = sys.exc_info()
- except: # No idea what they called, so we need this broad except here.
- dump_traceback("Error in portage_exec.run_exitfuncs", noiselevel=0)
- exc_info = sys.exc_info()
-
- if exc_info is not None:
- raise exc_info[0], exc_info[1], exc_info[2]
-
-atexit.register(run_exitfuncs)
-
-# We need to make sure that any processes spawned are killed off when
-# we exit. spawn() takes care of adding and removing pids to this list
-# as it creates and cleans up processes.
-spawned_pids = []
-def cleanup():
- while spawned_pids:
- pid = spawned_pids.pop()
- try:
- if os.waitpid(pid, os.WNOHANG) == (0, 0):
- os.kill(pid, signal.SIGTERM)
- os.waitpid(pid, 0)
- except OSError:
- # This pid has been cleaned up outside
- # of spawn().
- pass
-
-atexit_register(cleanup)
-
-def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
- uid=None, gid=None, groups=None, umask=None, logfile=None,
- path_lookup=True):
- """
- Spawns a given command.
-
- @param mycommand: the command to execute
- @type mycommand: String or List (Popen style list)
- @param env: A dict of Key=Value pairs for env variables
- @type env: Dictionary
- @param opt_name: an optional name for the spawn'd process (defaults to the binary name)
- @type opt_name: String
- @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
- @type fd_pipes: Dictionary
- @param returnpid: Return the Process IDs for a successful spawn.
- NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
- @type returnpid: Boolean
- @param uid: User ID to spawn as; useful for dropping privilages
- @type uid: Integer
- @param gid: Group ID to spawn as; useful for dropping privilages
- @type gid: Integer
- @param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
- @type groups: List
- @param umask: An integer representing the umask for the process (see man chmod for umask details)
- @type umask: Integer
- @param logfile: name of a file to use for logging purposes
- @type logfile: String
- @param path_lookup: If the binary is not fully specified then look for it in PATH
- @type path_lookup: Boolean
-
- logfile requires stdout and stderr to be assigned to this process (ie not pointed
- somewhere else.)
-
- """
-
- # mycommand is either a str or a list
- if isinstance(mycommand, str):
- mycommand = mycommand.split()
-
- # If an absolute path to an executable file isn't given
- # search for it unless we've been told not to.
- binary = mycommand[0]
- if (not os.path.isabs(binary) or not os.path.isfile(binary)
- or not os.access(binary, os.X_OK)):
- binary = path_lookup and find_binary(binary) or None
- if not binary:
- return -1
-
- # If we haven't been told what file descriptors to use
- # default to propogating our stdin, stdout and stderr.
- if fd_pipes is None:
- fd_pipes = {0:0, 1:1, 2:2}
-
- # mypids will hold the pids of all processes created.
- mypids = []
-
- if logfile:
- # Using a log file requires that stdout and stderr
- # are assigned to the process we're running.
- if 1 not in fd_pipes or 2 not in fd_pipes:
- raise ValueError(fd_pipes)
-
- # Create a pipe
- (pr, pw) = os.pipe()
-
- # Create a tee process, giving it our stdout and stderr
- # as well as the read end of the pipe.
- mypids.extend(spawn(('tee', '-i', '-a', logfile),
- returnpid=True, fd_pipes={0:pr,
- 1:fd_pipes[1], 2:fd_pipes[2]}))
-
- # We don't need the read end of the pipe, so close it.
- os.close(pr)
-
- # Assign the write end of the pipe to our stdout and stderr.
- fd_pipes[1] = pw
- fd_pipes[2] = pw
-
- pid = os.fork()
-
- if not pid:
- try:
- _exec(binary, mycommand, opt_name, fd_pipes,
- env, gid, groups, uid, umask)
- except Exception, e:
- # We need to catch _any_ exception so that it doesn't
- # propogate out of this function and cause exiting
- # with anything other than os._exit()
- sys.stderr.write("%s:\n %s\n" % (e, " ".join(mycommand)))
- sys.stderr.flush()
- os._exit(1)
-
- # Add the pid to our local and the global pid lists.
- mypids.append(pid)
- spawned_pids.append(pid)
-
- # If we started a tee process the write side of the pipe is no
- # longer needed, so close it.
- if logfile:
- os.close(pw)
-
- # If the caller wants to handle cleaning up the processes, we tell
- # it about all processes that were created.
- if returnpid:
- return mypids
-
- # Otherwise we clean them up.
- while mypids:
-
- # Pull the last reader in the pipe chain. If all processes
- # in the pipe are well behaved, it will die when the process
- # it is reading from dies.
- pid = mypids.pop(0)
-
- # and wait for it.
- retval = os.waitpid(pid, 0)[1]
-
- # When it's done, we can remove it from the
- # global pid list as well.
- spawned_pids.remove(pid)
-
- if retval:
- # If it failed, kill off anything else that
- # isn't dead yet.
- for pid in mypids:
- if os.waitpid(pid, os.WNOHANG) == (0,0):
- os.kill(pid, signal.SIGTERM)
- os.waitpid(pid, 0)
- spawned_pids.remove(pid)
-
- # If it got a signal, return the signal that was sent.
- if (retval & 0xff):
- return ((retval & 0xff) << 8)
-
- # Otherwise, return its exit code.
- return (retval >> 8)
-
- # Everything succeeded
- return 0
-
-def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask):
-
- """
- Execute a given binary with options
-
- @param binary: Name of program to execute
- @type binary: String
- @param mycommand: Options for program
- @type mycommand: String
- @param opt_name: Name of process (defaults to binary)
- @type opt_name: String
- @param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 }
- @type fd_pipes: Dictionary
- @param env: Key,Value mapping for Environmental Variables
- @type env: Dictionary
- @param gid: Group ID to run the process under
- @type gid: Integer
- @param groups: Groups the Process should be in.
- @type groups: Integer
- @param uid: User ID to run the process under
- @type uid: Integer
- @param umask: an int representing a unix umask (see man chmod for umask details)
- @type umask: Integer
- @rtype: None
- @returns: Never returns (calls os.execve)
- """
-
- # If the process we're creating hasn't been given a name
- # assign it the name of the executable.
- if not opt_name:
- opt_name = os.path.basename(binary)
-
- # Set up the command's argument list.
- myargs = [opt_name]
- myargs.extend(mycommand[1:])
-
- # Set up the command's pipes.
- my_fds = {}
- # To protect from cases where direct assignment could
- # clobber needed fds ({1:2, 2:1}) we first dupe the fds
- # into unused fds.
- for fd in fd_pipes:
- my_fds[fd] = os.dup(fd_pipes[fd])
- # Then assign them to what they should be.
- for fd in my_fds:
- os.dup2(my_fds[fd], fd)
- # Then close _all_ fds that haven't been explictly
- # requested to be kept open.
- for fd in get_open_fds():
- if fd not in my_fds:
- try:
- os.close(fd)
- except OSError:
- pass
-
- # Set requested process permissions.
- if gid:
- os.setgid(gid)
- if groups:
- os.setgroups(groups)
- if uid:
- os.setuid(uid)
- if umask:
- os.umask(umask)
-
- # And switch to the new process.
- os.execve(binary, myargs, env)
-
-def find_binary(binary):
- """
- Given a binary name, find the binary in PATH
-
- @param binary: Name of the binary to find
- @type string
- @rtype: None or string
- @returns: full path to binary or None if the binary could not be located.
- """
-
- for path in os.getenv("PATH", "").split(":"):
- filename = "%s/%s" % (path, binary)
- if os.access(filename, os.X_OK) and os.path.isfile(filename):
- return filename
- return None
+portage/exec.py \ No newline at end of file
diff --git a/pym/portage_gpg.py b/pym/portage_gpg.py
index 04ed6004..dd5c1065 100644..120000
--- a/pym/portage_gpg.py
+++ b/pym/portage_gpg.py
@@ -1,149 +1 @@
-# portage_gpg.py -- core Portage functionality
-# Copyright 2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-import os
-import copy
-import types
-import commands
-import portage_exception
-import portage_checksum
-
-GPG_BINARY = "/usr/bin/gpg"
-GPG_OPTIONS = " --lock-never --no-random-seed-file --no-greeting --no-sig-cache "
-GPG_VERIFY_FLAGS = " --verify "
-GPG_KEYDIR = " --homedir '%s' "
-GPG_KEYRING = " --keyring '%s' "
-
-UNTRUSTED = 0
-EXISTS = UNTRUSTED + 1
-MARGINAL = EXISTS + 1
-TRUSTED = MARGINAL + 1
-
-def fileStats(filepath):
- mya = []
- for x in os.stat(filepath):
- mya.append(x)
- mya.append(portage_checksum.perform_checksum(filepath))
- return mya
-
-
-class FileChecker:
- def __init__(self,keydir=None,keyring=None,requireSignedRing=False,minimumTrust=EXISTS):
- self.minimumTrust = TRUSTED # Default we require trust. For rings.
- self.keydir = None
- self.keyring = None
- self.keyringPath = None
- self.keyringStats = None
- self.keyringIsTrusted = False
-
- if (keydir != None):
- # Verify that the keydir is valid.
- if type(keydir) != types.StringType:
- raise portage_exception.InvalidDataType, "keydir argument: %s" % keydir
- if not os.path.isdir(keydir):
- raise portage_exception.DirectoryNotFound, "keydir: %s" % keydir
- self.keydir = copy.deepcopy(keydir)
-
- if (keyring != None):
- # Verify that the keyring is a valid filename and exists.
- if type(keyring) != types.StringType:
- raise portage_exception.InvalidDataType, "keyring argument: %s" % keyring
- if keyring.find("/") != -1:
- raise portage_exception.InvalidData, "keyring: %s" % keyring
- pathname = ""
- if keydir:
- pathname = keydir + "/" + keyring
- if not os.path.isfile(pathname):
- raise portage_exception.FileNotFound, "keyring missing: %s (dev.gentoo.org/~carpaski/gpg/)" % pathname
-
- keyringPath = keydir+"/"+keyring
-
- if not keyring or not keyringPath and requireSignedRing:
- raise portage_exception.MissingParameter
-
- self.keyringStats = fileStats(keyringPath)
- self.minimumTrust = TRUSTED
- if not self.verify(keyringPath, keyringPath+".asc"):
- self.keyringIsTrusted = False
- if requireSignedRing:
- raise portage_exception.InvalidSignature, "Required keyring verification: "+keyringPath
- else:
- self.keyringIsTrusted = True
-
- self.keyring = copy.deepcopy(keyring)
- self.keyringPath = self.keydir+"/"+self.keyring
- self.minimumTrust = minimumTrust
-
- def _verifyKeyring(self):
- if self.keyringStats and self.keyringPath:
- new_stats = fileStats(self.keyringPath)
- if new_stats != self.keyringStats:
- raise portage_exception.SecurityViolation, "GPG keyring changed!"
-
- def verify(self, filename, sigfile=None):
- """Uses minimumTrust to determine if it is Valid/True or Invalid/False"""
- self._verifyKeyring()
-
- if not os.path.isfile(filename):
- raise portage_exception.FileNotFound, filename
-
- if sigfile and not os.path.isfile(sigfile):
- raise portage_exception.FileNotFound, sigfile
-
- if self.keydir and not os.path.isdir(self.keydir):
- raise portage_exception.DirectoryNotFound, filename
-
- if self.keyringPath:
- if not os.path.isfile(self.keyringPath):
- raise portage_exception.FileNotFound, self.keyringPath
-
- if not os.path.isfile(filename):
- raise portage_exception.CommandNotFound, filename
-
- command = GPG_BINARY + GPG_VERIFY_FLAGS + GPG_OPTIONS
- if self.keydir:
- command += GPG_KEYDIR % (self.keydir)
- if self.keyring:
- command += GPG_KEYRING % (self.keyring)
-
- if sigfile:
- command += " '"+sigfile+"'"
- command += " '"+filename+"'"
-
- result,output = commands.getstatusoutput(command)
-
- signal = result & 0xff
- result = (result >> 8)
-
- if signal:
- raise SignalCaught, "Signal: %d" % (signal)
-
- trustLevel = UNTRUSTED
- if result == 0:
- trustLevel = TRUSTED
- #if output.find("WARNING") != -1:
- # trustLevel = MARGINAL
- if output.find("BAD") != -1:
- raise portage_exception.InvalidSignature, filename
- elif result == 1:
- trustLevel = EXISTS
- if output.find("BAD") != -1:
- raise portage_exception.InvalidSignature, filename
- elif result == 2:
- trustLevel = UNTRUSTED
- if output.find("could not be verified") != -1:
- raise portage_exception.MissingSignature, filename
- if output.find("public key not found") != -1:
- if self.keyringIsTrusted: # We trust the ring, but not the key specifically.
- trustLevel = MARGINAL
- else:
- raise portage_exception.InvalidSignature, filename+" (Unknown Signature)"
- else:
- raise portage_exception.UnknownCondition, "GPG returned unknown result: %d" % (result)
-
- if trustLevel >= self.minimumTrust:
- return True
- return False
+portage/gpg.py \ No newline at end of file
diff --git a/pym/portage_localization.py b/pym/portage_localization.py
index 59ccea71..3b57bfee 100644..120000
--- a/pym/portage_localization.py
+++ b/pym/portage_localization.py
@@ -1,21 +1 @@
-# portage_localization.py -- Code to manage/help portage localization.
-# Copyright 2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-# We define this to make the transition easier for us.
-def _(mystr):
- return mystr
-
-
-def localization_example():
- # Dict references allow translators to rearrange word order.
- print _("You can use this string for translating.")
- print _("Strings can be formatted with %(mystr)s like this.") % {"mystr": "VALUES"}
-
- a_value = "value.of.a"
- b_value = 123
- c_value = [1,2,3,4]
- print _("A: %(a)s -- B: %(b)s -- C: %(c)s") % {"a":a_value,"b":b_value,"c":c_value}
-
+portage/localization.py \ No newline at end of file
diff --git a/pym/portage_locks.py b/pym/portage_locks.py
index 28042e2f..58fef066 100644..120000
--- a/pym/portage_locks.py
+++ b/pym/portage_locks.py
@@ -1,312 +1 @@
-# portage: Lock management code
-# Copyright 2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-import errno, os, stat, time, types
-from portage_exception import InvalidData, DirectoryNotFound, FileNotFound
-from portage_data import portage_gid
-from portage_util import writemsg
-from portage_localization import _
-
-HARDLINK_FD = -2
-
-def lockdir(mydir):
- return lockfile(mydir,wantnewlockfile=1)
-def unlockdir(mylock):
- return unlockfile(mylock)
-
-def lockfile(mypath,wantnewlockfile=0,unlinkfile=0):
- """Creates all dirs upto, the given dir. Creates a lockfile
- for the given directory as the file: directoryname+'.portage_lockfile'."""
- import fcntl
-
- if not mypath:
- raise InvalidData, "Empty path given"
-
- if type(mypath) == types.StringType and mypath[-1] == '/':
- mypath = mypath[:-1]
-
- if type(mypath) == types.FileType:
- mypath = mypath.fileno()
- if type(mypath) == types.IntType:
- lockfilename = mypath
- wantnewlockfile = 0
- unlinkfile = 0
- elif wantnewlockfile:
- base, tail = os.path.split(mypath)
- lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
- del base, tail
- unlinkfile = 1
- else:
- lockfilename = mypath
-
- if type(mypath) == types.StringType:
- if not os.path.exists(os.path.dirname(mypath)):
- raise DirectoryNotFound, os.path.dirname(mypath)
- if not os.path.exists(lockfilename):
- old_mask=os.umask(000)
- myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR,0660)
- try:
- if os.stat(lockfilename).st_gid != portage_gid:
- os.chown(lockfilename,os.getuid(),portage_gid)
- except OSError, e:
- if e[0] == 2: # No such file or directory
- return lockfile(mypath,wantnewlockfile,unlinkfile)
- else:
- writemsg("Cannot chown a lockfile. This could cause inconvenience later.\n");
- os.umask(old_mask)
- else:
- myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR,0660)
-
- elif type(mypath) == types.IntType:
- myfd = mypath
-
- else:
- raise ValueError, "Unknown type passed in '%s': '%s'" % (type(mypath),mypath)
-
- # try for a non-blocking lock, if it's held, throw a message
- # we're waiting on lockfile and use a blocking attempt.
- locking_method = fcntl.lockf
- try:
- fcntl.lockf(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
- except IOError, e:
- if "errno" not in dir(e):
- raise
- if e.errno == errno.EAGAIN:
- # resource temp unavailable; eg, someone beat us to the lock.
- if type(mypath) == types.IntType:
- print "waiting for lock on fd %i" % myfd
- else:
- print "waiting for lock on %s" % lockfilename
- # try for the exclusive lock now.
- fcntl.lockf(myfd,fcntl.LOCK_EX)
- elif e.errno == errno.ENOLCK:
- # We're not allowed to lock on this FS.
- os.close(myfd)
- link_success = False
- if lockfilename == str(lockfilename):
- if wantnewlockfile:
- try:
- if os.stat(lockfilename)[stat.ST_NLINK] == 1:
- os.unlink(lockfilename)
- except OSError:
- pass
- link_success = hardlink_lockfile(lockfilename)
- if not link_success:
- raise
- locking_method = None
- myfd = HARDLINK_FD
- else:
- raise
-
-
- if type(lockfilename) == types.StringType and \
- myfd != HARDLINK_FD and os.fstat(myfd).st_nlink == 0:
- # The file was deleted on us... Keep trying to make one...
- os.close(myfd)
- writemsg("lockfile recurse\n",1)
- lockfilename,myfd,unlinkfile,locking_method = lockfile(mypath,wantnewlockfile,unlinkfile)
-
- writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1)
- return (lockfilename,myfd,unlinkfile,locking_method)
-
-def unlockfile(mytuple):
- import fcntl
-
- #XXX: Compatability hack.
- if len(mytuple) == 3:
- lockfilename,myfd,unlinkfile = mytuple
- locking_method = fcntl.flock
- elif len(mytuple) == 4:
- lockfilename,myfd,unlinkfile,locking_method = mytuple
- else:
- raise InvalidData
-
- if(myfd == HARDLINK_FD):
- unhardlink_lockfile(lockfilename)
- return True
-
- # myfd may be None here due to myfd = mypath in lockfile()
- if type(lockfilename) == types.StringType and not os.path.exists(lockfilename):
- writemsg("lockfile does not exist '%s'\n" % lockfilename,1)
- if myfd is not None:
- os.close(myfd)
- return False
-
- try:
- if myfd is None:
- myfd = os.open(lockfilename, os.O_WRONLY,0660)
- unlinkfile = 1
- locking_method(myfd,fcntl.LOCK_UN)
- except OSError:
- if type(lockfilename) == types.StringType:
- os.close(myfd)
- raise IOError, "Failed to unlock file '%s'\n" % lockfilename
-
- try:
- # This sleep call was added to allow other processes that are
- # waiting for a lock to be able to grab it before it is deleted.
- # lockfile() already accounts for this situation, however, and
- # the sleep here adds more time than is saved overall, so am
- # commenting until it is proved necessary.
- #time.sleep(0.0001)
- if unlinkfile:
- locking_method(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
- # We won the lock, so there isn't competition for it.
- # We can safely delete the file.
- writemsg("Got the lockfile...\n",1)
- if os.fstat(myfd).st_nlink == 1:
- os.unlink(lockfilename)
- writemsg("Unlinked lockfile...\n",1)
- locking_method(myfd,fcntl.LOCK_UN)
- else:
- writemsg("lockfile does not exist '%s'\n" % lockfilename,1)
- os.close(myfd)
- return False
- except Exception, e:
- writemsg("Failed to get lock... someone took it.\n",1)
- writemsg(str(e)+"\n",1)
-
- # why test lockfilename? because we may have been handed an
- # fd originally, and the caller might not like having their
- # open fd closed automatically on them.
- if type(lockfilename) == types.StringType:
- os.close(myfd)
-
- return True
-
-
-
-
-def hardlock_name(path):
- return path+".hardlock-"+os.uname()[1]+"-"+str(os.getpid())
-
-def hardlink_is_mine(link,lock):
- try:
- return os.stat(link).st_nlink == 2
- except OSError:
- return False
-
-def hardlink_lockfile(lockfilename, max_wait=14400):
- """Does the NFS, hardlink shuffle to ensure locking on the disk.
- We create a PRIVATE lockfile, that is just a placeholder on the disk.
- Then we HARDLINK the real lockfile to that private file.
- If our file can 2 references, then we have the lock. :)
- Otherwise we lather, rise, and repeat.
- We default to a 4 hour timeout.
- """
-
- start_time = time.time()
- myhardlock = hardlock_name(lockfilename)
- reported_waiting = False
-
- while(time.time() < (start_time + max_wait)):
- # We only need it to exist.
- myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0660)
- os.close(myfd)
-
- if not os.path.exists(myhardlock):
- raise FileNotFound, _("Created lockfile is missing: %(filename)s") % {"filename":myhardlock}
-
- try:
- res = os.link(myhardlock, lockfilename)
- except OSError:
- pass
-
- if hardlink_is_mine(myhardlock, lockfilename):
- # We have the lock.
- if reported_waiting:
- print
- return True
-
- if reported_waiting:
- writemsg(".")
- else:
- reported_waiting = True
- print
- print "Waiting on (hardlink) lockfile: (one '.' per 3 seconds)"
- print "This is a feature to prevent distfiles corruption."
- print "/usr/lib/portage/bin/clean_locks can fix stuck locks."
- print "Lockfile: " + lockfilename
- time.sleep(3)
-
- os.unlink(myhardlock)
- return False
-
-def unhardlink_lockfile(lockfilename):
- myhardlock = hardlock_name(lockfilename)
- if hardlink_is_mine(myhardlock, lockfilename):
- # Make sure not to touch lockfilename unless we really have a lock.
- try:
- os.unlink(lockfilename)
- except OSError:
- pass
- try:
- os.unlink(myhardlock)
- except OSError:
- pass
-
-def hardlock_cleanup(path, remove_all_locks=False):
- mypid = str(os.getpid())
- myhost = os.uname()[1]
- mydl = os.listdir(path)
-
- results = []
- mycount = 0
-
- mylist = {}
- for x in mydl:
- if os.path.isfile(path+"/"+x):
- parts = x.split(".hardlock-")
- if len(parts) == 2:
- filename = parts[0]
- hostpid = parts[1].split("-")
- host = "-".join(hostpid[:-1])
- pid = hostpid[-1]
-
- if not mylist.has_key(filename):
- mylist[filename] = {}
- if not mylist[filename].has_key(host):
- mylist[filename][host] = []
- mylist[filename][host].append(pid)
-
- mycount += 1
-
-
- results.append("Found %(count)s locks" % {"count":mycount})
-
- for x in mylist.keys():
- if mylist[x].has_key(myhost) or remove_all_locks:
- mylockname = hardlock_name(path+"/"+x)
- if hardlink_is_mine(mylockname, path+"/"+x) or \
- not os.path.exists(path+"/"+x) or \
- remove_all_locks:
- for y in mylist[x].keys():
- for z in mylist[x][y]:
- filename = path+"/"+x+".hardlock-"+y+"-"+z
- if filename == mylockname:
- continue
- try:
- # We're sweeping through, unlinking everyone's locks.
- os.unlink(filename)
- results.append(_("Unlinked: ") + filename)
- except OSError:
- pass
- try:
- os.unlink(path+"/"+x)
- results.append(_("Unlinked: ") + path+"/"+x)
- os.unlink(mylockname)
- results.append(_("Unlinked: ") + mylockname)
- except OSError:
- pass
- else:
- try:
- os.unlink(mylockname)
- results.append(_("Unlinked: ") + mylockname)
- except OSError:
- pass
-
- return results
-
+portage/locks.py \ No newline at end of file
diff --git a/pym/portage_mail.py b/pym/portage_mail.py
index 99ed77fd..c88fcdce 100644..120000
--- a/pym/portage_mail.py
+++ b/pym/portage_mail.py
@@ -1,89 +1 @@
-# portage.py -- core Portage functionality
-# Copyright 1998-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id: portage.py 3483 2006-06-10 21:40:40Z genone $
-
-import portage_exception, socket, smtplib, os, sys, time
-from email.MIMEText import MIMEText as TextMessage
-from email.MIMEMultipart import MIMEMultipart as MultipartMessage
-from email.MIMEBase import MIMEBase as BaseMessage
-
-def create_message(sender, recipient, subject, body, attachments=None):
- if attachments == None:
- mymessage = TextMessage(body)
- else:
- mymessage = MultipartMessage()
- mymessage.attach(TextMessage(body))
- for x in attachments:
- if isinstance(x, BaseMessage):
- mymessage.attach(x)
- elif isinstance(x, str):
- mymessage.attach(TextMessage(x))
- else:
- raise portage_exception.PortageException("Can't handle type of attachment: %s" % type(x))
-
- mymessage.set_unixfrom(sender)
- mymessage["To"] = recipient
- mymessage["From"] = sender
- mymessage["Subject"] = subject
- mymessage["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S %z")
-
- return mymessage
-
-def send_mail(mysettings, message):
- mymailhost = "localhost"
- mymailport = 25
- mymailuser = ""
- mymailpasswd = ""
- myrecipient = "root@localhost"
-
- # Syntax for PORTAGE_ELOG_MAILURI (if defined):
- # adress [[user:passwd@]mailserver[:port]]
- # where adress: recipient adress
- # user: username for smtp auth (defaults to none)
- # passwd: password for smtp auth (defaults to none)
- # mailserver: smtp server that should be used to deliver the mail (defaults to localhost)
- # alternatively this can also be the absolute path to a sendmail binary if you don't want to use smtp
- # port: port to use on the given smtp server (defaults to 25, values > 100000 indicate that starttls should be used on (port-100000))
- if " " in mysettings["PORTAGE_ELOG_MAILURI"]:
- myrecipient, mymailuri = mysettings["PORTAGE_ELOG_MAILURI"].split()
- if "@" in mymailuri:
- myauthdata, myconndata = mymailuri.rsplit("@", 1)
- try:
- mymailuser,mymailpasswd = myauthdata.split(":")
- except ValueError:
- print "!!! invalid SMTP AUTH configuration, trying unauthenticated ..."
- else:
- myconndata = mymailuri
- if ":" in myconndata:
- mymailhost,mymailport = myconndata.split(":")
- else:
- mymailhost = myconndata
- else:
- myrecipient = mysettings["PORTAGE_ELOG_MAILURI"]
-
- myfrom = message.get("From")
-
- # user wants to use a sendmail binary instead of smtp
- if mymailhost[0] == os.sep and os.path.exists(mymailhost):
- fd = os.popen(mymailhost+" -f "+myfrom+" "+myrecipient, "w")
- fd.write(message.as_string())
- if fd.close() != None:
- sys.stderr.write("!!! %s returned with a non-zero exit code. This generally indicates an error.\n" % mymailhost)
- else:
- try:
- if int(mymailport) > 100000:
- myconn = smtplib.SMTP(mymailhost, int(mymailport) - 100000)
- myconn.starttls()
- else:
- myconn = smtplib.SMTP(mymailhost, mymailport)
- if mymailuser != "" and mymailpasswd != "":
- myconn.login(mymailuser, mymailpasswd)
- myconn.sendmail(myfrom, myrecipient, message.as_string())
- myconn.quit()
- except smtplib.SMTPException, e:
- raise portage_exception.PortageException("!!! An error occured while trying to send logmail:\n"+str(e))
- except socket.error, e:
- raise portage_exception.PortageException("!!! A network error occured while trying to send logmail:\n"+str(e)+"\nSure you configured PORTAGE_ELOG_MAILURI correctly?")
- return
-
+portage/mail.py \ No newline at end of file
diff --git a/pym/portage_manifest.py b/pym/portage_manifest.py
index e621606c..6f6c0e9c 100644..120000
--- a/pym/portage_manifest.py
+++ b/pym/portage_manifest.py
@@ -1,618 +1 @@
-# Copyright 1999-2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Header: $
-
-import errno, os, sets
-if not hasattr(__builtins__, "set"):
- from sets import Set as set
-
-import portage_exception, portage_versions, portage_const
-from portage_checksum import *
-from portage_exception import *
-from portage_util import write_atomic
-
-class FileNotInManifestException(PortageException):
- pass
-
-def manifest2AuxfileFilter(filename):
- filename = filename.strip(os.sep)
- mysplit = filename.split(os.path.sep)
- if "CVS" in mysplit:
- return False
- for x in mysplit:
- if x.startswith("."):
- return False
- return not filename.startswith("digest-")
-
-def manifest2MiscfileFilter(filename):
- filename = filename.strip(os.sep)
- return not (filename in ["CVS", ".svn", "files", "Manifest"] or filename.endswith(".ebuild"))
-
-def guessManifestFileType(filename):
- """ Perform a best effort guess of which type the given filename is, avoid using this if possible """
- if filename.startswith("files" + os.sep + "digest-"):
- return None
- if filename.startswith("files" + os.sep):
- return "AUX"
- elif filename.endswith(".ebuild"):
- return "EBUILD"
- elif filename in ["ChangeLog", "metadata.xml"]:
- return "MISC"
- else:
- return "DIST"
-
-def parseManifest2(mysplit):
- myentry = None
- if len(mysplit) > 4 and mysplit[0] in portage_const.MANIFEST2_IDENTIFIERS:
- mytype = mysplit[0]
- myname = mysplit[1]
- mysize = int(mysplit[2])
- myhashes = dict(zip(mysplit[3::2], mysplit[4::2]))
- myhashes["size"] = mysize
- myentry = Manifest2Entry(type=mytype, name=myname, hashes=myhashes)
- return myentry
-
-def parseManifest1(mysplit):
- myentry = None
- if len(mysplit) == 4 and mysplit[0] in ["size"] + portage_const.MANIFEST1_HASH_FUNCTIONS:
- myname = mysplit[2]
- mytype = None
- mytype = guessManifestFileType(myname)
- if mytype == "AUX":
- if myname.startswith("files" + os.path.sep):
- myname = myname[6:]
- mysize = int(mysplit[3])
- myhashes = {mysplit[0]: mysplit[1]}
- myhashes["size"] = mysize
- myentry = Manifest1Entry(type=mytype, name=myname, hashes=myhashes)
- return myentry
-
-class ManifestEntry(object):
- __slots__ = ("type", "name", "hashes")
- def __init__(self, **kwargs):
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
- def __cmp__(self, other):
- if str(self) == str(other):
- return 0
- return 1
-
-class Manifest1Entry(ManifestEntry):
- def __str__(self):
- myhashkeys = self.hashes.keys()
- for hashkey in myhashkeys:
- if hashkey != "size":
- break
- hashvalue = self.hashes[hashkey]
- myname = self.name
- if self.type == "AUX" and not myname.startswith("files" + os.sep):
- myname = os.path.join("files", myname)
- return " ".join([hashkey, str(hashvalue), myname, str(self.hashes["size"])])
-
-class Manifest2Entry(ManifestEntry):
- def __str__(self):
- myline = " ".join([self.type, self.name, str(self.hashes["size"])])
- myhashkeys = self.hashes.keys()
- myhashkeys.remove("size")
- myhashkeys.sort()
- for h in myhashkeys:
- myline += " " + h + " " + str(self.hashes[h])
- return myline
-
-class Manifest(object):
- parsers = (parseManifest2, parseManifest1)
- def __init__(self, pkgdir, distdir, fetchlist_dict=None,
- manifest1_compat=True, from_scratch=False):
- """ create new Manifest instance for package in pkgdir
- and add compability entries for old portage versions if manifest1_compat == True.
- Do not parse Manifest file if from_scratch == True (only for internal use)
- The fetchlist_dict parameter is required only for generation of
- a Manifest (not needed for parsing and checking sums)."""
- self.pkgdir = pkgdir.rstrip(os.sep) + os.sep
- self.fhashdict = {}
- self.hashes = portage_const.MANIFEST2_HASH_FUNCTIONS[:]
- self.hashes.append("size")
- if manifest1_compat:
- self.hashes.extend(portage_const.MANIFEST1_HASH_FUNCTIONS)
- self.hashes = sets.Set(self.hashes)
- for t in portage_const.MANIFEST2_IDENTIFIERS:
- self.fhashdict[t] = {}
- if not from_scratch:
- self._read()
- self.compat = manifest1_compat
- if fetchlist_dict != None:
- self.fetchlist_dict = fetchlist_dict
- else:
- self.fetchlist_dict = {}
- self.distdir = distdir
- self.guessType = guessManifestFileType
-
- def getFullname(self):
- """ Returns the absolute path to the Manifest file for this instance """
- return os.path.join(self.pkgdir, "Manifest")
-
- def getDigests(self):
- """ Compability function for old digest/manifest code, returns dict of filename:{hashfunction:hashvalue} """
- rval = {}
- for t in portage_const.MANIFEST2_IDENTIFIERS:
- rval.update(self.fhashdict[t])
- return rval
-
- def getTypeDigests(self, ftype):
- """ Similar to getDigests(), but restricted to files of the given type. """
- return self.fhashdict[ftype]
-
- def _readDigests(self, myhashdict=None):
- """ Parse old style digest files for this Manifest instance """
- if myhashdict is None:
- myhashdict = {}
- try:
- for d in os.listdir(os.path.join(self.pkgdir, "files")):
- if d.startswith("digest-"):
- self._readManifest(os.path.join(self.pkgdir, "files", d), mytype="DIST",
- myhashdict=myhashdict)
- except (IOError, OSError), e:
- if e.errno == errno.ENOENT:
- pass
- else:
- raise
- return myhashdict
-
- def _readManifest(self, file_path, myhashdict=None, **kwargs):
- """Parse a manifest or an old style digest. If myhashdict is given
- then data will be added too it. Otherwise, a new dict will be created
- and returned."""
- try:
- fd = open(file_path, "r")
- if myhashdict is None:
- myhashdict = {}
- self._parseDigests(fd, myhashdict=myhashdict, **kwargs)
- fd.close()
- return myhashdict
- except (OSError, IOError), e:
- if e.errno == errno.ENOENT:
- raise FileNotFound(file_path)
- else:
- raise
-
- def _read(self):
- """ Parse Manifest file for this instance """
- try:
- self._readManifest(self.getFullname(), myhashdict=self.fhashdict)
- except FileNotFound:
- pass
- self._readDigests(myhashdict=self.fhashdict)
-
-
- def _parseManifestLines(self, mylines):
- """Parse manifest lines and return a list of manifest entries."""
- for myline in mylines:
- myentry = None
- mysplit = myline.split()
- for parser in self.parsers:
- myentry = parser(mysplit)
- if myentry is not None:
- yield myentry
- break # go to the next line
-
- def _parseDigests(self, mylines, myhashdict=None, mytype=None):
- """Parse manifest entries and store the data in myhashdict. If mytype
- is specified, it will override the type for all parsed entries."""
- if myhashdict is None:
- myhashdict = {}
- for myentry in self._parseManifestLines(mylines):
- if mytype is None:
- myentry_type = myentry.type
- else:
- myentry_type = mytype
- myhashdict.setdefault(myentry_type, {})
- myhashdict[myentry_type].setdefault(myentry.name, {})
- myhashdict[myentry_type][myentry.name].update(myentry.hashes)
- return myhashdict
-
- def _writeDigests(self, force=False):
- """ Create old style digest files for this Manifest instance """
- cpvlist = [os.path.join(self._pkgdir_category(), x[:-7]) for x in os.listdir(self.pkgdir) if x.endswith(".ebuild")]
- rval = []
- try:
- os.makedirs(os.path.join(self.pkgdir, "files"))
- except OSError, oe:
- if oe.errno == errno.EEXIST:
- pass
- else:
- raise
- for cpv in cpvlist:
- dname = os.path.join(self.pkgdir, "files", "digest-%s" % self._catsplit(cpv)[1])
- distlist = self._getCpvDistfiles(cpv)
- missing_digests = set()
- for f in distlist:
- if f not in self.fhashdict["DIST"] or len(self.fhashdict["DIST"][f]) == 0:
- missing_digests.add(f)
- if missing_digests:
- # This allows us to force remove of stale digests for the
- # ebuild --force digest option.
- distlist = [f for f in distlist if f not in missing_digests]
- update_digest = True
- if not force:
- try:
- f = open(dname, "r")
- old_data = self._parseDigests(f)
- f.close()
- if len(old_data) == 1 and "DIST" in old_data:
- new_data = self._getDigestData(distlist)
- if "DIST" in new_data:
- for myfile in new_data["DIST"]:
- for hashname in \
- new_data["DIST"][myfile].keys():
- if hashname != "size" and hashname not in \
- portage_const.MANIFEST1_HASH_FUNCTIONS:
- del new_data["DIST"][myfile][hashname]
- if new_data["DIST"] == old_data["DIST"]:
- update_digest = False
- except (IOError, OSError), e:
- if errno.ENOENT == e.errno:
- pass
- else:
- raise
- if update_digest:
- mylines = self._createDigestLines1(distlist, self.fhashdict)
- if mylines:
- mylines = "\n".join(mylines) + "\n"
- else:
- mylines = ""
- write_atomic(dname, mylines)
- rval.append(dname)
- return rval
-
- def _getDigestData(self, distlist):
- """create a hash dict for a specific list of files"""
- myhashdict = {}
- for myname in distlist:
- for mytype in self.fhashdict:
- if myname in self.fhashdict[mytype]:
- myhashdict.setdefault(mytype, {})
- myhashdict[mytype].setdefault(myname, {})
- myhashdict[mytype][myname].update(self.fhashdict[mytype][myname])
- return myhashdict
-
- def _createDigestLines1(self, distlist, myhashdict):
- """ Create an old style digest file."""
- mylines = []
- myfiles = myhashdict["DIST"].keys()
- myfiles.sort()
- for f in myfiles:
- if f in distlist:
- myhashkeys = myhashdict["DIST"][f].keys()
- myhashkeys.sort()
- for h in myhashkeys:
- if h not in portage_const.MANIFEST1_HASH_FUNCTIONS:
- continue
- myline = " ".join([h, str(myhashdict["DIST"][f][h]), f, str(myhashdict["DIST"][f]["size"])])
- mylines.append(myline)
- return mylines
-
- def _addDigestsToManifest(self, digests, fd):
- """ Add entries for old style digest files to Manifest file """
- mylines = []
- for dname in digests:
- myhashes = perform_multiple_checksums(dname, portage_const.MANIFEST1_HASH_FUNCTIONS+["size"])
- for h in myhashes:
- mylines.append((" ".join([h, str(myhashes[h]), os.path.join("files", os.path.basename(dname)), str(myhashes["size"])])))
- fd.write("\n".join(mylines))
- fd.write("\n")
-
- def _createManifestEntries(self):
- mytypes = self.fhashdict.keys()
- mytypes.sort()
- for t in mytypes:
- myfiles = self.fhashdict[t].keys()
- myfiles.sort()
- for f in myfiles:
- myentry = Manifest2Entry(
- type=t, name=f, hashes=self.fhashdict[t][f].copy())
- myhashkeys = myentry.hashes.keys()
- for h in myhashkeys:
- if h not in ["size"] + portage_const.MANIFEST2_HASH_FUNCTIONS:
- del myentry.hashes[h]
- yield myentry
- if self.compat and t != "DIST":
- mysize = self.fhashdict[t][f]["size"]
- myhashes = self.fhashdict[t][f]
- for h in myhashkeys:
- if h not in portage_const.MANIFEST1_HASH_FUNCTIONS:
- continue
- yield Manifest1Entry(
- type=t, name=f, hashes={"size":mysize, h:myhashes[h]})
-
- if self.compat:
- cvp_list = self.fetchlist_dict.keys()
- cvp_list.sort()
- for cpv in cvp_list:
- digest_path = os.path.join("files", "digest-%s" % self._catsplit(cpv)[1])
- dname = os.path.join(self.pkgdir, digest_path)
- try:
- myhashes = perform_multiple_checksums(dname, portage_const.MANIFEST1_HASH_FUNCTIONS+["size"])
- myhashkeys = myhashes.keys()
- myhashkeys.sort()
- for h in myhashkeys:
- if h in portage_const.MANIFEST1_HASH_FUNCTIONS:
- yield Manifest1Entry(type="AUX", name=digest_path,
- hashes={"size":myhashes["size"], h:myhashes[h]})
- except FileNotFound:
- pass
-
- def write(self, sign=False, force=False):
- """ Write Manifest instance to disk, optionally signing it """
- try:
- if self.compat:
- self._writeDigests()
- myentries = list(self._createManifestEntries())
- update_manifest = True
- if not force:
- try:
- f = open(self.getFullname(), "r")
- oldentries = list(self._parseManifestLines(f))
- f.close()
- if len(oldentries) == len(myentries):
- update_manifest = False
- for i in xrange(len(oldentries)):
- if oldentries[i] != myentries[i]:
- update_manifest = True
- break
- except (IOError, OSError), e:
- if e.errno == errno.ENOENT:
- pass
- else:
- raise
- if update_manifest:
- fd = open(self.getFullname(), "w")
- for myentry in myentries:
- fd.write("%s\n" % str(myentry))
- fd.close()
- if sign:
- self.sign()
- except (IOError, OSError), e:
- if e.errno == errno.EACCES:
- raise PermissionDenied(str(e))
- raise
-
- def sign(self):
- """ Sign the Manifest """
- raise NotImplementedError()
-
- def validateSignature(self):
- """ Validate signature on Manifest """
- raise NotImplementedError()
-
- def addFile(self, ftype, fname, hashdict=None, ignoreMissing=False):
- """ Add entry to Manifest optionally using hashdict to avoid recalculation of hashes """
- if ftype == "AUX" and not fname.startswith("files/"):
- fname = os.path.join("files", fname)
- if not os.path.exists(self.pkgdir+fname) and not ignoreMissing:
- raise FileNotFound(fname)
- if not ftype in portage_const.MANIFEST2_IDENTIFIERS:
- raise InvalidDataType(ftype)
- if ftype == "AUX" and fname.startswith("files"):
- fname = fname[6:]
- self.fhashdict[ftype][fname] = {}
- if hashdict != None:
- self.fhashdict[ftype][fname].update(hashdict)
- if not portage_const.MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]:
- self.updateFileHashes(ftype, fname, checkExisting=False, ignoreMissing=ignoreMissing)
-
- def removeFile(self, ftype, fname):
- """ Remove given entry from Manifest """
- del self.fhashdict[ftype][fname]
-
- def hasFile(self, ftype, fname):
- """ Return wether the Manifest contains an entry for the given type,filename pair """
- return (fname in self.fhashdict[ftype])
-
- def findFile(self, fname):
- """ Return entrytype of the given file if present in Manifest or None if not present """
- for t in portage_const.MANIFEST2_IDENTIFIERS:
- if fname in self.fhashdict[t]:
- return t
- return None
-
- def create(self, checkExisting=False, assumeDistHashesSometimes=False,
- assumeDistHashesAlways=False, requiredDistfiles=[]):
- """ Recreate this Manifest from scratch. This will not use any
- existing checksums unless assumeDistHashesSometimes or
- assumeDistHashesAlways is true (assumeDistHashesSometimes will only
- cause DIST checksums to be reused if the file doesn't exist in
- DISTDIR). The requiredDistfiles parameter specifies a list of
- distfiles to raise a FileNotFound exception for (if no file or existing
- checksums are available), and defaults to all distfiles when not
- specified."""
- if checkExisting:
- self.checkAllHashes()
- if assumeDistHashesSometimes or assumeDistHashesAlways:
- distfilehashes = self.fhashdict["DIST"]
- else:
- distfilehashes = {}
- self.__init__(self.pkgdir, self.distdir,
- fetchlist_dict=self.fetchlist_dict, from_scratch=True)
- for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(self.pkgdir):
- break
- for f in pkgdir_files:
- if f.endswith(".ebuild"):
- mytype = "EBUILD"
- elif manifest2MiscfileFilter(f):
- mytype = "MISC"
- else:
- continue
- self.fhashdict[mytype][f] = perform_multiple_checksums(self.pkgdir+f, self.hashes)
- recursive_files = []
- cut_len = len(os.path.join(self.pkgdir, "files") + os.sep)
- for parentdir, dirs, files in os.walk(os.path.join(self.pkgdir, "files")):
- for f in files:
- full_path = os.path.join(parentdir, f)
- recursive_files.append(full_path[cut_len:])
- for f in recursive_files:
- if not manifest2AuxfileFilter(f):
- continue
- self.fhashdict["AUX"][f] = perform_multiple_checksums(
- os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
- cpvlist = [os.path.join(self._pkgdir_category(), x[:-7]) for x in os.listdir(self.pkgdir) if x.endswith(".ebuild")]
- distlist = set()
- for cpv in cpvlist:
- distlist.update(self._getCpvDistfiles(cpv))
- if requiredDistfiles is None:
- # This allows us to force removal of stale digests for the
- # ebuild --force digest option (no distfiles are required).
- requiredDistfiles = set()
- elif len(requiredDistfiles) == 0:
- # repoman passes in an empty list, which implies that all distfiles
- # are required.
- requiredDistfiles = distlist.copy()
- for f in distlist:
- fname = os.path.join(self.distdir, f)
- mystat = None
- try:
- mystat = os.stat(fname)
- except OSError:
- pass
- if f in distfilehashes and \
- ((assumeDistHashesSometimes and mystat is None) or \
- (assumeDistHashesAlways and mystat is None) or \
- (assumeDistHashesAlways and mystat is not None and \
- len(distfilehashes[f]) == len(self.hashes) and \
- distfilehashes[f]["size"] == mystat.st_size)):
- self.fhashdict["DIST"][f] = distfilehashes[f]
- else:
- try:
- self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes)
- except FileNotFound:
- if f in requiredDistfiles:
- raise
-
- def _pkgdir_category(self):
- return self.pkgdir.rstrip(os.sep).split(os.sep)[-2]
-
- def _getAbsname(self, ftype, fname):
- if ftype == "DIST":
- absname = os.path.join(self.distdir, fname)
- elif ftype == "AUX":
- absname = os.path.join(self.pkgdir, "files", fname)
- else:
- absname = os.path.join(self.pkgdir, fname)
- return absname
-
- def checkAllHashes(self, ignoreMissingFiles=False):
- for t in portage_const.MANIFEST2_IDENTIFIERS:
- self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
-
- def checkTypeHashes(self, idtype, ignoreMissingFiles=False):
- for f in self.fhashdict[idtype]:
- self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles)
-
- def checkFileHashes(self, ftype, fname, ignoreMissing=False):
- myhashes = self.fhashdict[ftype][fname]
- try:
- ok,reason = verify_all(self._getAbsname(ftype, fname), self.fhashdict[ftype][fname])
- if not ok:
- raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason)))
- return ok, reason
- except FileNotFound, e:
- if not ignoreMissing:
- raise
- return False, "File Not Found: '%s'" % str(e)
-
- def checkCpvHashes(self, cpv, checkDistfiles=True, onlyDistfiles=False, checkMiscfiles=False):
- """ check the hashes for all files associated to the given cpv, include all
- AUX files and optionally all MISC files. """
- if not onlyDistfiles:
- self.checkTypeHashes("AUX", ignoreMissingFiles=False)
- if checkMiscfiles:
- self.checkTypeHashes("MISC", ignoreMissingFiles=False)
- ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
- self.checkFileHashes("EBUILD", ebuildname, ignoreMissing=False)
- if checkDistfiles or onlyDistfiles:
- for f in self._getCpvDistfiles(cpv):
- self.checkFileHashes("DIST", f, ignoreMissing=False)
-
- def _getCpvDistfiles(self, cpv):
- """ Get a list of all DIST files associated to the given cpv """
- return self.fetchlist_dict[cpv]
-
- def getDistfilesSize(self, fetchlist):
- total_bytes = 0
- for f in fetchlist:
- total_bytes += int(self.fhashdict["DIST"][f]["size"])
- return total_bytes
-
- def updateFileHashes(self, ftype, fname, checkExisting=True, ignoreMissing=True, reuseExisting=False):
- """ Regenerate hashes for the given file """
- if checkExisting:
- self.checkFileHashes(ftype, fname, ignoreMissing=ignoreMissing)
- if not ignoreMissing and not self.fhashdict[ftype].has_key(fname):
- raise FileNotInManifestException(fname)
- if not self.fhashdict[ftype].has_key(fname):
- self.fhashdict[ftype][fname] = {}
- myhashkeys = list(self.hashes)
- if reuseExisting:
- for k in [h for h in self.fhashdict[ftype][fname] if h in myhashkeys]:
- myhashkeys.remove(k)
- myhashes = perform_multiple_checksums(self._getAbsname(ftype, fname), myhashkeys)
- self.fhashdict[ftype][fname].update(myhashes)
-
- def updateTypeHashes(self, idtype, checkExisting=False, ignoreMissingFiles=True):
- """ Regenerate all hashes for all files of the given type """
- for fname in self.fhashdict[idtype]:
- self.updateFileHashes(idtype, fname, checkExisting)
-
- def updateAllHashes(self, checkExisting=False, ignoreMissingFiles=True):
- """ Regenerate all hashes for all files in this Manifest. """
- for ftype in portage_const.MANIFEST2_IDENTIFIERS:
- self.updateTypeHashes(idtype, fname, checkExisting)
-
- def updateCpvHashes(self, cpv, ignoreMissingFiles=True):
- """ Regenerate all hashes associated to the given cpv (includes all AUX and MISC
- files)."""
- self.updateTypeHashes("AUX", ignoreMissingFiles=ignoreMissingFiles)
- self.updateTypeHashes("MISC", ignoreMissingFiles=ignoreMissingFiles)
- ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
- self.updateFileHashes("EBUILD", ebuildname, ignoreMissingFiles=ignoreMissingFiles)
- for f in self._getCpvDistfiles(cpv):
- self.updateFileHashes("DIST", f, ignoreMissingFiles=ignoreMissingFiles)
-
- def updateHashesGuessType(self, fname, *args, **kwargs):
- """ Regenerate hashes for the given file (guesses the type and then
- calls updateFileHashes)."""
- mytype = self.guessType(fname)
- if mytype == "AUX":
- fname = fname[len("files" + os.sep):]
- elif mytype is None:
- return
- myrealtype = self.findFile(fname)
- if myrealtype is not None:
- mytype = myrealtype
- return self.updateFileHashes(mytype, fname, *args, **kwargs)
-
- def getFileData(self, ftype, fname, key):
- """ Return the value of a specific (type,filename,key) triple, mainly useful
- to get the size for distfiles."""
- return self.fhashdict[ftype][fname][key]
-
- def getVersions(self):
- """ Returns a list of manifest versions present in the manifest file. """
- rVal = []
- mfname = self.getFullname()
- if not os.path.exists(mfname):
- return rVal
- myfile = open(mfname, "r")
- lines = myfile.readlines()
- myfile.close()
- for l in lines:
- mysplit = l.split()
- if len(mysplit) == 4 and mysplit[0] in portage_const.MANIFEST1_HASH_FUNCTIONS and not 1 in rVal:
- rVal.append(1)
- elif len(mysplit) > 4 and mysplit[0] in portage_const.MANIFEST2_IDENTIFIERS and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal:
- rVal.append(2)
- return rVal
-
- def _catsplit(self, pkg_key):
- """Split a category and package, returning a list of [cat, pkg].
- This is compatible with portage.catsplit()"""
- return pkg_key.split("/", 1)
+portage/manifest.py \ No newline at end of file
diff --git a/pym/portage_news.py b/pym/portage_news.py
index b54261d9..166e3ce0 100644..120000
--- a/pym/portage_news.py
+++ b/pym/portage_news.py
@@ -1,268 +1 @@
-# portage: news management code
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-from portage_const import INCREMENTALS, PROFILE_PATH, NEWS_LIB_PATH
-from portage import config, vartree, vardbapi, portdbapi
-from portage_util import ensure_dirs, apply_permissions
-from portage_data import portage_gid
-from portage_locks import lockfile, unlockfile, lockdir, unlockdir
-from portage_exception import FileNotFound
-import os, re
-
-class NewsManager(object):
- """
- This object manages GLEP 42 style news items. It will cache news items
- that have previously shown up and notify users when there are relevant news
- items that apply to their packages that the user has not previously read.
-
- Creating a news manager requires:
- root - typically ${ROOT} see man make.conf and man emerge for details
- NEWS_PATH - path to news items; usually $REPODIR/metadata/news
- UNREAD_PATH - path to the news.repoid.unread file; this helps us track news items
-
- """
-
- TIMESTAMP_FILE = "news-timestamp"
-
- def __init__( self, root, NEWS_PATH, UNREAD_PATH, LANGUAGE_ID='en' ):
- self.NEWS_PATH = NEWS_PATH
- self.UNREAD_PATH = UNREAD_PATH
- self.TIMESTAMP_PATH = os.path.join( root, NEWS_LIB_PATH, NewsManager.TIMESTAMP_FILE )
- self.target_root = root
- self.LANGUAGE_ID = LANGUAGE_ID
- self.config = config( config_root = os.environ.get("PORTAGE_CONFIGROOT", "/"),
- target_root = root, config_incrementals = INCREMENTALS)
- self.vdb = vardbapi( settings = self.config, root = root,
- vartree = vartree( root = root, settings = self.config ) )
- self.portdb = portdbapi( porttree_root = self.config["PORTDIR"], mysettings = self.config )
-
- # Ensure that the unread path exists and is writable.
- dirmode = 02070
- modemask = 02
- ensure_dirs(self.UNREAD_PATH, mode=dirmode, mask=modemask, gid=portage_gid)
-
- def updateItems( self, repoid ):
- """
- Figure out which news items from NEWS_PATH are both unread and relevant to
- the user (according to the GLEP 42 standards of relevancy). Then add these
- items into the news.repoid.unread file.
- """
-
- repos = self.portdb.getRepositories()
- if repoid not in repos:
- raise ValueError("Invalid repoID: %s" % repoid)
-
- timestamp_file = self.TIMESTAMP_PATH + repoid
- if os.path.exists(timestamp_file):
- # Make sure the timestamp has correct permissions.
- apply_permissions( filename=timestamp_file,
- uid=self.config["PORTAGE_INST_UID"], gid=portage_gid, mode=664 )
- timestamp = os.stat(timestamp_file).st_mtime
- else:
- timestamp = 0
-
- path = os.path.join( self.portdb.getRepositoryPath( repoid ), self.NEWS_PATH )
- newsdir_lock = None
- try:
- newsdir_lock = lockdir( self.portdb.getRepositoryPath(repoid) )
- # Skip reading news for repoid if the news dir does not exist. Requested by
- # NightMorph :)
- if not os.path.exists( path ):
- return None
- news = os.listdir( path )
- updates = []
- for item in news:
- try:
- file = os.path.join( path, item, item + "." + self.LANGUAGE_ID + ".txt")
- tmp = NewsItem( file , timestamp )
- except TypeError:
- continue
-
- if tmp.isRelevant( profile=os.readlink(PROFILE_PATH), config=config, vardb=self.vdb):
- updates.append( tmp )
- finally:
- if newsdir_lock:
- unlockdir(newsdir_lock)
-
- del path
-
- path = os.path.join( self.UNREAD_PATH, "news-" + repoid + ".unread" )
- try:
- unread_lock = lockfile( path )
- if not os.path.exists( path ):
- #create the file if it does not exist
- open( path, "w" )
- # Ensure correct perms on the unread file.
- apply_permissions( filename=path,
- uid=self.config["PORTAGE_INST_UID"], gid=portage_gid, mode=664 )
- # Make sure we have the correct permissions when created
- unread_file = open( path, "a" )
-
- for item in updates:
- unread_file.write( item.path + "\n" )
- unread_file.close()
- finally:
- unlockfile(unread_lock)
-
- # Touch the timestamp file
- f = open(timestamp_file, "w")
- f.close()
-
- def getUnreadItems( self, repoid, update=False ):
- """
- Determine if there are unread relevant items in news.repoid.unread.
- If there are unread items return their number.
- If update is specified, updateNewsItems( repoid ) will be called to
- check for new items.
- """
-
- if update:
- self.updateItems( repoid )
-
- unreadfile = os.path.join( self.UNREAD_PATH, "news-"+ repoid +".unread" )
- try:
- try:
- unread_lock = lockfile(unreadfile)
- # Set correct permissions on the news-repoid.unread file
- apply_permissions( filename=unreadfile,
- uid=int(self.config["PORTAGE_INST_UID"]), gid=portage_gid, mode=0664 )
-
- if os.path.exists( unreadfile ):
- unread = open( unreadfile ).readlines()
- if len(unread):
- return len(unread)
- except FileNotFound:
- pass # unread file may not exist
- finally:
- if unread_lock:
- unlockfile(unread_lock)
-
-_installedRE = re.compile("Display-If-Installed:(.*)\n")
-_profileRE = re.compile("Display-If-Profile:(.*)\n")
-_keywordRE = re.compile("Display-If-Keyword:(.*)\n")
-
-class NewsItem(object):
- """
- This class encapsulates a GLEP 42 style news item.
- It's purpose is to wrap parsing of these news items such that portage can determine
- whether a particular item is 'relevant' or not. This requires parsing the item
- and determining 'relevancy restrictions'; these include "Display if Installed" or
- "display if arch: x86" and so forth.
-
- Creation of a news item involves passing in the path to the particular news item.
-
- """
-
- def __init__( self, path, cache_mtime = 0 ):
- """
- For a given news item we only want if it path is a file and it's
- mtime is newer than the cache'd timestamp.
- """
- if not os.path.isfile( path ):
- raise TypeError
- if not os.stat( path ).st_mtime > cache_mtime:
- raise TypeError
- self.path = path
- self._parsed = False
-
- def isRelevant( self, vardb, config, profile ):
- """
- This function takes a dict of keyword arguments; one should pass in any
- objects need to do to lookups (like what keywords we are on, what profile,
- and a vardb so we can look at installed packages).
- Each restriction will pluck out the items that are required for it to match
- or raise a ValueError exception if the required object is not present.
- """
-
- if not len(self.restrictions):
- return True # no restrictions to match means everyone should see it
-
- kwargs = { 'vardb' : vardb,
- 'config' : config,
- 'profile' : profile }
-
- for restriction in self.restrictions:
- if restriction.checkRestriction( **kwargs ):
- return True
-
- return False # No restrictions were met; thus we aren't relevant :(
-
- def parse( self ):
- lines = open(self.path).readlines()
- self.restrictions = []
- for line in lines:
- #Optimization to ignore regex matchines on lines that
- #will never match
- if not line.startswith("D"):
- continue
- restricts = { _installedRE : DisplayInstalledRestriction,
- _profileRE : DisplayProfileRestriction,
- _keywordRE : DisplayKeywordRestriction }
- for regex, restriction in restricts.iteritems():
- match = regex.match(line)
- if match:
- self.restrictions.append( restriction( match.groups()[0].strip() ) )
- continue
- self._parsed = True
-
- def __getattr__( self, attr ):
- if not self._parsed:
- self.parse()
- return self.__dict__[attr]
-
-class DisplayRestriction(object):
- """
- A base restriction object representing a restriction of display.
- news items may have 'relevancy restrictions' preventing them from
- being important. In this case we need a manner of figuring out if
- a particular item is relevant or not. If any of it's restrictions
- are met, then it is displayed
- """
-
- def checkRestriction( self, **kwargs ):
- raise NotImplementedError("Derived class should over-ride this method")
-
-class DisplayProfileRestriction(DisplayRestriction):
- """
- A profile restriction where a particular item shall only be displayed
- if the user is running a specific profile.
- """
-
- def __init__( self, profile ):
- self.profile = profile
-
- def checkRestriction( self, **kwargs ):
- if self.profile == kwargs['profile']:
- return True
- return False
-
-class DisplayKeywordRestriction(DisplayRestriction):
- """
- A keyword restriction where a particular item shall only be displayed
- if the user is running a specific keyword.
- """
-
- def __init__( self, keyword ):
- self.keyword = keyword
-
- def checkRestriction( self, **kwargs ):
- if kwargs['config']["ARCH"] == self.keyword:
- return True
- return False
-
-class DisplayInstalledRestriction(DisplayRestriction):
- """
- An Installation restriction where a particular item shall only be displayed
- if the user has that item installed.
- """
-
- def __init__( self, cpv ):
- self.cpv = cpv
-
- def checkRestriction( self, **kwargs ):
- vdb = kwargs['vardb']
- if vdb.match( self.cpv ):
- return True
- return False
+portage/news.py \ No newline at end of file
diff --git a/pym/portage_selinux.py b/pym/portage_selinux.py
index e4d80fa1..5c7a71ca 100644..120000
--- a/pym/portage_selinux.py
+++ b/pym/portage_selinux.py
@@ -1,8 +1 @@
-# Copyright 1999-2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Header: $
-
-import selinux
-from selinux import is_selinux_enabled
-from selinux_aux import setexec, secure_symlink, secure_rename, \
- secure_copy, secure_mkdir, getcontext, get_sid, get_lsid
+portage/selinux.py \ No newline at end of file
diff --git a/pym/portage_update.py b/pym/portage_update.py
index 1a2a1d88..cb88f8a5 100644..120000
--- a/pym/portage_update.py
+++ b/pym/portage_update.py
@@ -1,224 +1 @@
-# Copyright 1999-2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Header: $
-
-import errno, os, re, sys
-
-from portage_util import ConfigProtect, grabfile, new_protect_filename, \
- normalize_path, write_atomic, writemsg
-from portage_exception import DirectoryNotFound, PortageException
-from portage_versions import ververify
-from portage_dep import dep_getkey, get_operator, isvalidatom, isjustname
-from portage_const import USER_CONFIG_PATH, WORLD_FILE
-
-ignored_dbentries = ("CONTENTS", "environment.bz2")
-
-def update_dbentry(update_cmd, mycontent):
- if update_cmd[0] == "move":
- old_value, new_value = update_cmd[1], update_cmd[2]
- if mycontent.count(old_value):
- old_value = re.escape(old_value);
- mycontent = re.sub(old_value+"(:|$|\\s)", new_value+"\\1", mycontent)
- def myreplace(matchobj):
- if ververify(matchobj.group(2)):
- return "%s-%s" % (new_value, matchobj.group(2))
- else:
- return "".join(matchobj.groups())
- mycontent = re.sub("(%s-)(\\S*)" % old_value, myreplace, mycontent)
- elif update_cmd[0] == "slotmove" and get_operator(update_cmd[1]) is None:
- pkg, origslot, newslot = update_cmd[1:]
- old_value = "%s:%s" % (pkg, origslot)
- if mycontent.count(old_value):
- old_value = re.escape(old_value)
- new_value = "%s:%s" % (pkg, newslot)
- mycontent = re.sub(old_value+"($|\\s)", new_value+"\\1", mycontent)
- return mycontent
-
-def update_dbentries(update_iter, mydata):
- """Performs update commands and returns a
- dict containing only the updated items."""
- updated_items = {}
- for k, mycontent in mydata.iteritems():
- if k not in ignored_dbentries:
- orig_content = mycontent
- for update_cmd in update_iter:
- mycontent = update_dbentry(update_cmd, mycontent)
- if mycontent != orig_content:
- updated_items[k] = mycontent
- return updated_items
-
-def fixdbentries(update_iter, dbdir):
- """Performs update commands which result in search and replace operations
- for each of the files in dbdir (excluding CONTENTS and environment.bz2).
- Returns True when actual modifications are necessary and False otherwise."""
- mydata = {}
- for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
- file_path = os.path.join(dbdir, myfile)
- f = open(file_path, "r")
- mydata[myfile] = f.read()
- f.close()
- updated_items = update_dbentries(update_iter, mydata)
- for myfile, mycontent in updated_items.iteritems():
- file_path = os.path.join(dbdir, myfile)
- write_atomic(file_path, mycontent)
- return len(updated_items) > 0
-
-def grab_updates(updpath, prev_mtimes=None):
- """Returns all the updates from the given directory as a sorted list of
- tuples, each containing (file_path, statobj, content). If prev_mtimes is
- given then only updates with differing mtimes are considered."""
- try:
- mylist = os.listdir(updpath)
- except OSError, oe:
- if oe.errno == errno.ENOENT:
- raise DirectoryNotFound(updpath)
- raise
- if prev_mtimes is None:
- prev_mtimes = {}
- # validate the file name (filter out CVS directory, etc...)
- mylist = [myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"]
- if len(mylist) == 0:
- return []
-
- # update names are mangled to make them sort properly
- mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
- mylist.sort()
- mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]
-
- update_data = []
- for myfile in mylist:
- file_path = os.path.join(updpath, myfile)
- mystat = os.stat(file_path)
- if file_path not in prev_mtimes or \
- long(prev_mtimes[file_path]) != long(mystat.st_mtime):
- f = open(file_path)
- content = f.read()
- f.close()
- update_data.append((file_path, mystat, content))
- return update_data
-
-def parse_updates(mycontent):
- """Valid updates are returned as a list of split update commands."""
- myupd = []
- errors = []
- mylines = mycontent.splitlines()
- for myline in mylines:
- mysplit = myline.split()
- if len(mysplit) == 0:
- continue
- if mysplit[0] not in ("move", "slotmove"):
- errors.append("ERROR: Update type not recognized '%s'" % myline)
- continue
- if mysplit[0] == "move":
- if len(mysplit) != 3:
- errors.append("ERROR: Update command invalid '%s'" % myline)
- continue
- orig_value, new_value = mysplit[1], mysplit[2]
- for cp in (orig_value, new_value):
- if not (isvalidatom(cp) and isjustname(cp)):
- errors.append(
- "ERROR: Malformed update entry '%s'" % myline)
- continue
- if mysplit[0] == "slotmove":
- if len(mysplit)!=4:
- errors.append("ERROR: Update command invalid '%s'" % myline)
- continue
- pkg, origslot, newslot = mysplit[1], mysplit[2], mysplit[3]
- if not isvalidatom(pkg):
- errors.append("ERROR: Malformed update entry '%s'" % myline)
- continue
-
- # The list of valid updates is filtered by continue statements above.
- myupd.append(mysplit)
- return myupd, errors
-
-def update_config_files(config_root, protect, protect_mask, update_iter):
- """Perform global updates on /etc/portage/package.* and the world file.
- config_root - location of files to update
- protect - list of paths from CONFIG_PROTECT
- protect_mask - list of paths from CONFIG_PROTECT_MASK
- update_iter - list of update commands as returned from parse_updates()"""
- config_root = normalize_path(config_root)
- update_files = {}
- file_contents = {}
- myxfiles = ["package.mask", "package.unmask", \
- "package.keywords", "package.use"]
- myxfiles += [os.path.join("profile", x) for x in myxfiles]
- abs_user_config = os.path.join(config_root,
- USER_CONFIG_PATH.lstrip(os.path.sep))
- recursivefiles = []
- for x in myxfiles:
- config_file = os.path.join(abs_user_config, x)
- if os.path.isdir(config_file):
- for parent, dirs, files in os.walk(config_file):
- for y in dirs:
- if y.startswith("."):
- dirs.remove(y)
- for y in files:
- if y.startswith("."):
- continue
- recursivefiles.append(
- os.path.join(parent, y)[len(abs_user_config) + 1:])
- else:
- recursivefiles.append(x)
- myxfiles = recursivefiles
- for x in myxfiles:
- try:
- myfile = open(os.path.join(abs_user_config, x),"r")
- file_contents[x] = myfile.readlines()
- myfile.close()
- except IOError:
- if file_contents.has_key(x):
- del file_contents[x]
- continue
- worldlist = grabfile(os.path.join(config_root, WORLD_FILE))
-
- for update_cmd in update_iter:
- if update_cmd[0] == "move":
- old_value, new_value = update_cmd[1], update_cmd[2]
- #update world entries:
- for x in range(0,len(worldlist)):
- #update world entries, if any.
- worldlist[x] = \
- dep_transform(worldlist[x], old_value, new_value)
-
- #update /etc/portage/packages.*
- for x in file_contents:
- for mypos in range(0,len(file_contents[x])):
- line = file_contents[x][mypos]
- if line[0] == "#" or not line.strip():
- continue
- myatom = line.split()[0]
- if myatom.startswith("-"):
- # package.mask supports incrementals
- myatom = myatom[1:]
- if not isvalidatom(myatom):
- continue
- key = dep_getkey(myatom)
- if key == old_value:
- file_contents[x][mypos] = \
- line.replace(old_value, new_value)
- update_files[x] = 1
- sys.stdout.write("p")
- sys.stdout.flush()
-
- write_atomic(os.path.join(config_root, WORLD_FILE), "\n".join(worldlist))
-
- protect_obj = ConfigProtect(
- config_root, protect, protect_mask)
- for x in update_files:
- updating_file = os.path.join(abs_user_config, x)
- if protect_obj.isprotected(updating_file):
- updating_file = new_protect_filename(updating_file)
- try:
- write_atomic(updating_file, "".join(file_contents[x]))
- except PortageException, e:
- writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
- writemsg("!!! An error occured while updating a config file:" + \
- " '%s'\n" % updating_file, noiselevel=-1)
- continue
-
-def dep_transform(mydep, oldkey, newkey):
- if dep_getkey(mydep) == oldkey:
- return mydep.replace(oldkey, newkey, 1)
- return mydep
+portage/update.py \ No newline at end of file
diff --git a/pym/portage_util.py b/pym/portage_util.py
index cc5a566b..570febc0 100644..120000
--- a/pym/portage_util.py
+++ b/pym/portage_util.py
@@ -1,1037 +1 @@
-# Copyright 2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-from portage_exception import PortageException, FileNotFound, \
- OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem
-import portage_exception
-from portage_dep import isvalidatom
-
-import os, errno, shlex, stat, string, sys
-try:
- import cPickle
-except ImportError:
- import pickle as cPickle
-
-if not hasattr(__builtins__, "set"):
- from sets import Set as set
-
-noiselimit = 0
-
-def writemsg(mystr,noiselevel=0,fd=None):
- """Prints out warning and debug messages based on the noiselimit setting"""
- global noiselimit
- if fd is None:
- fd = sys.stderr
- if noiselevel <= noiselimit:
- fd.write(mystr)
- fd.flush()
-
-def writemsg_stdout(mystr,noiselevel=0):
- """Prints messages stdout based on the noiselimit setting"""
- writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
-
-def normalize_path(mypath):
- """
- os.path.normpath("//foo") returns "//foo" instead of "/foo"
- We dislike this behavior so we create our own normpath func
- to fix it.
- """
- if mypath.startswith(os.path.sep):
- # posixpath.normpath collapses 3 or more leading slashes to just 1.
- return os.path.normpath(2*os.path.sep + mypath)
- else:
- return os.path.normpath(mypath)
-
-def grabfile(myfilename, compat_level=0, recursive=0):
- """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
- begins with a #, it is ignored, as are empty lines"""
-
- mylines=grablines(myfilename, recursive)
- newlines=[]
- for x in mylines:
- #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
- #into single spaces.
- myline=" ".join(x.split())
- if not len(myline):
- continue
- if myline[0]=="#":
- # Check if we have a compat-level string. BC-integration data.
- # '##COMPAT==>N<==' 'some string attached to it'
- mylinetest = myline.split("<==",1)
- if len(mylinetest) == 2:
- myline_potential = mylinetest[1]
- mylinetest = mylinetest[0].split("##COMPAT==>")
- if len(mylinetest) == 2:
- if compat_level >= int(mylinetest[1]):
- # It's a compat line, and the key matches.
- newlines.append(myline_potential)
- continue
- else:
- continue
- newlines.append(myline)
- return newlines
-
-def map_dictlist_vals(func,myDict):
- """Performs a function on each value of each key in a dictlist.
- Returns a new dictlist."""
- new_dl = {}
- for key in myDict.keys():
- new_dl[key] = []
- new_dl[key] = map(func,myDict[key])
- return new_dl
-
-def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
- """
- Stacks an array of dict-types into one array. Optionally merging or
- overwriting matching key/value pairs for the dict[key]->list.
- Returns a single dict. Higher index in lists is preferenced.
-
- Example usage:
- >>> from portage_util import stack_dictlist
- >>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
- >>> {'a':'b','x':'y'}
- >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
- >>> {'a':['b','c'] }
- >>> a = {'KEYWORDS':['x86','alpha']}
- >>> b = {'KEYWORDS':['-x86']}
- >>> print stack_dictlist( [a,b] )
- >>> { 'KEYWORDS':['x86','alpha','-x86']}
- >>> print stack_dictlist( [a,b], incremental=True)
- >>> { 'KEYWORDS':['alpha'] }
- >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
- >>> { 'KEYWORDS':['alpha'] }
-
- @param original_dicts a list of (dictionary objects or None)
- @type list
- @param incremental True or false depending on whether new keys should overwrite
- keys which already exist.
- @type boolean
- @param incrementals A list of items that should be incremental (-foo removes foo from
- the returned dict).
- @type list
- @param ignore_none Appears to be ignored, but probably was used long long ago.
- @type boolean
-
- """
- final_dict = {}
- for mydict in original_dicts:
- if mydict is None:
- continue
- for y in mydict.keys():
- if not y in final_dict:
- final_dict[y] = []
-
- for thing in mydict[y]:
- if thing:
- if incremental or y in incrementals:
- if thing == "-*":
- final_dict[y] = []
- continue
- elif thing.startswith("-"):
- try:
- final_dict[y].remove(thing[1:])
- except ValueError:
- pass
- continue
- if thing not in final_dict[y]:
- final_dict[y].append(thing)
- if y in final_dict and not final_dict[y]:
- del final_dict[y]
- return final_dict
-
-def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
- """Stacks an array of dict-types into one array. Optionally merging or
- overwriting matching key/value pairs for the dict[key]->string.
- Returns a single dict."""
- final_dict = None
- for mydict in dicts:
- if mydict is None:
- if ignore_none:
- continue
- else:
- return None
- if final_dict is None:
- final_dict = {}
- for y in mydict.keys():
- if mydict[y]:
- if final_dict.has_key(y) and (incremental or (y in incrementals)):
- final_dict[y] += " "+mydict[y][:]
- else:
- final_dict[y] = mydict[y][:]
- mydict[y] = " ".join(mydict[y].split()) # Remove extra spaces.
- return final_dict
-
-def stack_lists(lists, incremental=1):
- """Stacks an array of list-types into one array. Optionally removing
- distinct values using '-value' notation. Higher index is preferenced.
-
- all elements must be hashable."""
-
- new_list = {}
- for x in lists:
- for y in filter(None, x):
- if incremental:
- if y == "-*":
- new_list.clear()
- elif y.startswith("-"):
- new_list.pop(y[1:], None)
- else:
- new_list[y] = True
- else:
- new_list[y] = True
- return new_list.keys()
-
-def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
- """
- This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
-
- @param myfilename: file to process
- @type myfilename: string (path)
- @param juststrings: only return strings
- @type juststrings: Boolean (integer)
- @param empty: Ignore certain lines
- @type empty: Boolean (integer)
- @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
- @type recursive: Boolean (integer)
- @param incremental: Append to the return list, don't overwrite
- @type incremental: Boolean (integer)
- @rtype: Dictionary
- @returns:
- 1. Returns the lines in a file in a dictionary, for example:
- 'sys-apps/portage x86 amd64 ppc'
- would return
- { "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
- the line syntax is key : [list of values]
- """
- newdict={}
- for x in grablines(myfilename, recursive):
- #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
- #into single spaces.
- if x[0] == "#":
- continue
- myline=x.split()
- if len(myline) < 2 and empty == 0:
- continue
- if len(myline) < 1 and empty == 1:
- continue
- if incremental:
- newdict.setdefault(myline[0], []).extend(myline[1:])
- else:
- newdict[myline[0]] = myline[1:]
- if juststrings:
- for k, v in newdict.iteritems():
- newdict[k] = " ".join(v)
- return newdict
-
-def grabdict_package(myfilename, juststrings=0, recursive=0):
- pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
- # We need to call keys() here in order to avoid the possibility of
- # "RuntimeError: dictionary changed size during iteration"
- # when an invalid atom is deleted.
- for x in pkgs.keys():
- if not isvalidatom(x):
- del(pkgs[x])
- writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x),
- noiselevel=-1)
- return pkgs
-
-def grabfile_package(myfilename, compatlevel=0, recursive=0):
- pkgs=grabfile(myfilename, compatlevel, recursive=recursive)
- for x in range(len(pkgs)-1, -1, -1):
- pkg = pkgs[x]
- if pkg[0] == "-":
- pkg = pkg[1:]
- if pkg[0] == "*": # Kill this so we can deal the "packages" file too
- pkg = pkg[1:]
- if not isvalidatom(pkg):
- writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]),
- noiselevel=-1)
- del(pkgs[x])
- return pkgs
-
-def grablines(myfilename,recursive=0):
- mylines=[]
- if recursive and os.path.isdir(myfilename):
- if myfilename in ["RCS", "CVS", "SCCS"]:
- return mylines
- dirlist = os.listdir(myfilename)
- dirlist.sort()
- for f in dirlist:
- if not f.startswith(".") and not f.endswith("~"):
- mylines.extend(grablines(
- os.path.join(myfilename, f), recursive))
- else:
- try:
- myfile = open(myfilename, "r")
- mylines = myfile.readlines()
- myfile.close()
- except IOError:
- pass
- return mylines
-
-def writedict(mydict,myfilename,writekey=True):
- """Writes out a dict to a file; writekey=0 mode doesn't write out
- the key and assumes all values are strings, not lists."""
- myfile = None
- try:
- myfile = atomic_ofstream(myfilename)
- if not writekey:
- for x in mydict.values():
- myfile.write(x+"\n")
- else:
- for x in mydict.keys():
- myfile.write("%s %s\n" % (x, " ".join(mydict[x])))
- myfile.close()
- except IOError:
- if myfile is not None:
- myfile.abort()
- return 0
- return 1
-
-def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
- mykeys={}
- try:
- f=open(mycfg,'r')
- except IOError, e:
- if e.errno != errno.ENOENT:
- raise
- return None
- try:
- lex = shlex.shlex(f, posix=True)
- lex.wordchars=string.digits+string.letters+"~!@#$%*_\:;?,./-+{}"
- lex.quotes="\"'"
- if allow_sourcing:
- lex.source="source"
- while 1:
- key=lex.get_token()
- if key == "export":
- key = lex.get_token()
- if key is None:
- #normal end of file
- break;
- equ=lex.get_token()
- if (equ==''):
- #unexpected end of file
- #lex.error_leader(self.filename,lex.lineno)
- if not tolerant:
- writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n",
- noiselevel=-1)
- raise Exception("ParseError: Unexpected EOF: "+str(mycfg)+": on/before line "+str(lex.lineno))
- else:
- return mykeys
- elif (equ!='='):
- #invalid token
- #lex.error_leader(self.filename,lex.lineno)
- if not tolerant:
- writemsg("!!! Invalid token (not \"=\") "+str(equ)+"\n",
- noiselevel=-1)
- raise Exception("ParseError: Invalid token (not '='): "+str(mycfg)+": line "+str(lex.lineno))
- else:
- return mykeys
- val=lex.get_token()
- if val is None:
- #unexpected end of file
- #lex.error_leader(self.filename,lex.lineno)
- if not tolerant:
- writemsg("!!! Unexpected end of config file: variable "+str(key)+"\n",
- noiselevel=-1)
- raise portage_exception.CorruptionError("ParseError: Unexpected EOF: "+str(mycfg)+": line "+str(lex.lineno))
- else:
- return mykeys
- if expand:
- mykeys[key] = varexpand(val, mykeys)
- else:
- mykeys[key] = val
- except SystemExit, e:
- raise
- except Exception, e:
- raise portage_exception.ParseError(str(e)+" in "+mycfg)
- return mykeys
-
-#cache expansions of constant strings
-cexpand={}
-def varexpand(mystring,mydict={}):
- newstring = cexpand.get(" "+mystring, None)
- if newstring is not None:
- return newstring
-
- """
- new variable expansion code. Removes quotes, handles \n, etc.
- This code is used by the configfile code, as well as others (parser)
- This would be a good bunch of code to port to C.
- """
- numvars=0
- mystring=" "+mystring
- #in single, double quotes
- insing=0
- indoub=0
- pos=1
- newstring=" "
- while (pos<len(mystring)):
- if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
- if (indoub):
- newstring=newstring+"'"
- else:
- insing=not insing
- pos=pos+1
- continue
- elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
- if (insing):
- newstring=newstring+'"'
- else:
- indoub=not indoub
- pos=pos+1
- continue
- if (not insing):
- #expansion time
- if (mystring[pos]=="\n"):
- #convert newlines to spaces
- newstring=newstring+" "
- pos=pos+1
- elif (mystring[pos]=="\\"):
- #backslash expansion time
- if (pos+1>=len(mystring)):
- newstring=newstring+mystring[pos]
- break
- else:
- a=mystring[pos+1]
- pos=pos+2
- if a=='a':
- newstring=newstring+chr(007)
- elif a=='b':
- newstring=newstring+chr(010)
- elif a=='e':
- newstring=newstring+chr(033)
- elif (a=='f') or (a=='n'):
- newstring=newstring+chr(012)
- elif a=='r':
- newstring=newstring+chr(015)
- elif a=='t':
- newstring=newstring+chr(011)
- elif a=='v':
- newstring=newstring+chr(013)
- elif a!='\n':
- #remove backslash only, as bash does: this takes care of \\ and \' and \" as well
- newstring=newstring+mystring[pos-1:pos]
- continue
- elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
- pos=pos+1
- if mystring[pos]=="{":
- pos=pos+1
- braced=True
- else:
- braced=False
- myvstart=pos
- validchars=string.ascii_letters+string.digits+"_"
- while mystring[pos] in validchars:
- if (pos+1)>=len(mystring):
- if braced:
- cexpand[mystring]=""
- return ""
- else:
- pos=pos+1
- break
- pos=pos+1
- myvarname=mystring[myvstart:pos]
- if braced:
- if mystring[pos]!="}":
- cexpand[mystring]=""
- return ""
- else:
- pos=pos+1
- if len(myvarname)==0:
- cexpand[mystring]=""
- return ""
- numvars=numvars+1
- if mydict.has_key(myvarname):
- newstring=newstring+mydict[myvarname]
- else:
- newstring=newstring+mystring[pos]
- pos=pos+1
- else:
- newstring=newstring+mystring[pos]
- pos=pos+1
- if numvars==0:
- cexpand[mystring]=newstring[1:]
- return newstring[1:]
-
-def pickle_write(data,filename,debug=0):
- import os
- try:
- myf=open(filename,"w")
- cPickle.dump(data,myf,-1)
- myf.flush()
- myf.close()
- writemsg("Wrote pickle: "+str(filename)+"\n",1)
- os.chown(myefn,uid,portage_gid)
- os.chmod(myefn,0664)
- except SystemExit, e:
- raise
- except Exception, e:
- return 0
- return 1
-
-def pickle_read(filename,default=None,debug=0):
- import os
- if not os.access(filename, os.R_OK):
- writemsg("pickle_read(): File not readable. '"+filename+"'\n",1)
- return default
- data = None
- try:
- myf = open(filename)
- mypickle = cPickle.Unpickler(myf)
- mypickle.find_global = None
- data = mypickle.load()
- myf.close()
- del mypickle,myf
- writemsg("pickle_read(): Loaded pickle. '"+filename+"'\n",1)
- except SystemExit, e:
- raise
- except Exception, e:
- writemsg("!!! Failed to load pickle: "+str(e)+"\n",1)
- data = default
- return data
-
-def dump_traceback(msg, noiselevel=1):
- import sys, traceback
- info = sys.exc_info()
- if not info[2]:
- stack = traceback.extract_stack()[:-1]
- error = None
- else:
- stack = traceback.extract_tb(info[2])
- error = str(info[1])
- writemsg("\n====================================\n", noiselevel=noiselevel)
- writemsg("%s\n\n" % msg, noiselevel=noiselevel)
- for line in traceback.format_list(stack):
- writemsg(line, noiselevel=noiselevel)
- if error:
- writemsg(error+"\n", noiselevel=noiselevel)
- writemsg("====================================\n\n", noiselevel=noiselevel)
-
-def unique_array(s):
- """lifted from python cookbook, credit: Tim Peters
- Return a list of the elements in s in arbitrary order, sans duplicates"""
- n = len(s)
- # assume all elements are hashable, if so, it's linear
- try:
- return list(set(s))
- except TypeError:
- pass
-
- # so much for linear. abuse sort.
- try:
- t = list(s)
- t.sort()
- except TypeError:
- pass
- else:
- assert n > 0
- last = t[0]
- lasti = i = 1
- while i < n:
- if t[i] != last:
- t[lasti] = last = t[i]
- lasti += 1
- i += 1
- return t[:lasti]
-
- # blah. back to original portage.unique_array
- u = []
- for x in s:
- if x not in u:
- u.append(x)
- return u
-
-def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
- stat_cached=None, follow_links=True):
- """Apply user, group, and mode bits to a file if the existing bits do not
- already match. The default behavior is to force an exact match of mode
- bits. When mask=0 is specified, mode bits on the target file are allowed
- to be a superset of the mode argument (via logical OR). When mask>0, the
- mode bits that the target file is allowed to have are restricted via
- logical XOR.
- Returns True if the permissions were modified and False otherwise."""
-
- modified = False
-
- if stat_cached is None:
- try:
- if follow_links:
- stat_cached = os.stat(filename)
- else:
- stat_cached = os.lstat(filename)
- except OSError, oe:
- func_call = "stat('%s')" % filename
- if oe.errno == errno.EPERM:
- raise OperationNotPermitted(func_call)
- elif oe.errno == errno.EACCES:
- raise PermissionDenied(func_call)
- elif oe.errno == errno.ENOENT:
- raise FileNotFound(filename)
- else:
- raise
-
- if (uid != -1 and uid != stat_cached.st_uid) or \
- (gid != -1 and gid != stat_cached.st_gid):
- try:
- if follow_links:
- os.chown(filename, uid, gid)
- else:
- import portage_data
- portage_data.lchown(filename, uid, gid)
- modified = True
- except OSError, oe:
- func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
- if oe.errno == errno.EPERM:
- raise OperationNotPermitted(func_call)
- elif oe.errno == errno.EACCES:
- raise PermissionDenied(func_call)
- elif oe.errno == errno.EROFS:
- raise ReadOnlyFileSystem(func_call)
- elif oe.errno == errno.ENOENT:
- raise FileNotFound(filename)
- else:
- raise
-
- new_mode = -1
- st_mode = stat_cached.st_mode & 07777 # protect from unwanted bits
- if mask >= 0:
- if mode == -1:
- mode = 0 # Don't add any mode bits when mode is unspecified.
- else:
- mode = mode & 07777
- if (mode & st_mode != mode) or \
- ((mask ^ st_mode) & st_mode != st_mode):
- new_mode = mode | st_mode
- new_mode = (mask ^ new_mode) & new_mode
- elif mode != -1:
- mode = mode & 07777 # protect from unwanted bits
- if mode != st_mode:
- new_mode = mode
-
- # The chown system call may clear S_ISUID and S_ISGID
- # bits, so those bits are restored if necessary.
- if modified and new_mode == -1 and \
- (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
- if mode == -1:
- new_mode = st_mode
- else:
- mode = mode & 07777
- if mask >= 0:
- new_mode = mode | st_mode
- new_mode = (mask ^ new_mode) & new_mode
- else:
- new_mode = mode
- if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
- new_mode = -1
-
- if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
- # Mode doesn't matter for symlinks.
- new_mode = -1
-
- if new_mode != -1:
- try:
- os.chmod(filename, new_mode)
- modified = True
- except OSError, oe:
- func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
- if oe.errno == errno.EPERM:
- raise OperationNotPermitted(func_call)
- elif oe.errno == errno.EACCES:
- raise PermissionDenied(func_call)
- elif oe.errno == errno.EROFS:
- raise ReadOnlyFileSystem(func_call)
- elif oe.errno == errno.ENOENT:
- raise FileNotFound(filename)
- raise
- return modified
-
-def apply_stat_permissions(filename, newstat, **kwargs):
- """A wrapper around apply_secpass_permissions that gets
- uid, gid, and mode from a stat object"""
- return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
- mode=newstat.st_mode, **kwargs)
-
-def apply_recursive_permissions(top, uid=-1, gid=-1,
- dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
- """A wrapper around apply_secpass_permissions that applies permissions
- recursively. If optional argument onerror is specified, it should be a
- function; it will be called with one argument, a PortageException instance.
- Returns True if all permissions are applied and False if some are left
- unapplied."""
-
- if onerror is None:
- # Default behavior is to dump errors to stderr so they won't
- # go unnoticed. Callers can pass in a quiet instance.
- def onerror(e):
- if isinstance(e, OperationNotPermitted):
- writemsg("Operation Not Permitted: %s\n" % str(e),
- noiselevel=-1)
- elif isinstance(e, FileNotFound):
- writemsg("File Not Found: '%s'\n" % str(e), noiselevel=-1)
- else:
- raise
-
- all_applied = True
- for dirpath, dirnames, filenames in os.walk(top):
- try:
- applied = apply_secpass_permissions(dirpath,
- uid=uid, gid=gid, mode=dirmode, mask=dirmask)
- if not applied:
- all_applied = False
- except PortageException, e:
- all_applied = False
- onerror(e)
-
- for name in filenames:
- try:
- applied = apply_secpass_permissions(os.path.join(dirpath, name),
- uid=uid, gid=gid, mode=filemode, mask=filemask)
- if not applied:
- all_applied = False
- except PortageException, e:
- all_applied = False
- onerror(e)
- return all_applied
-
-def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
- stat_cached=None, follow_links=True):
- """A wrapper around apply_permissions that uses secpass and simple
- logic to apply as much of the permissions as possible without
- generating an obviously avoidable permission exception. Despite
- attempts to avoid an exception, it's possible that one will be raised
- anyway, so be prepared.
- Returns True if all permissions are applied and False if some are left
- unapplied."""
-
- if stat_cached is None:
- try:
- if follow_links:
- stat_cached = os.stat(filename)
- else:
- stat_cached = os.lstat(filename)
- except OSError, oe:
- func_call = "stat('%s')" % filename
- if oe.errno == errno.EPERM:
- raise OperationNotPermitted(func_call)
- elif oe.errno == errno.EACCES:
- raise PermissionDenied(func_call)
- elif oe.errno == errno.ENOENT:
- raise FileNotFound(filename)
- else:
- raise
-
- all_applied = True
-
- import portage_data # not imported globally because of circular dep
- if portage_data.secpass < 2:
-
- if uid != -1 and \
- uid != stat_cached.st_uid:
- all_applied = False
- uid = -1
-
- if gid != -1 and \
- gid != stat_cached.st_gid and \
- gid not in os.getgroups():
- all_applied = False
- gid = -1
-
- apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
- stat_cached=stat_cached, follow_links=follow_links)
- return all_applied
-
-class atomic_ofstream(file):
- """Write a file atomically via os.rename(). Atomic replacement prevents
- interprocess interference and prevents corruption of the target
- file when the write is interrupted (for example, when an 'out of space'
- error occurs)."""
-
- def __init__(self, filename, mode='w', follow_links=True, **kargs):
- """Opens a temporary filename.pid in the same directory as filename."""
- self._aborted = False
-
- if follow_links:
- canonical_path = os.path.realpath(filename)
- self._real_name = canonical_path
- tmp_name = "%s.%i" % (canonical_path, os.getpid())
- try:
- super(atomic_ofstream, self).__init__(tmp_name, mode=mode, **kargs)
- return
- except (OSError, IOError), e:
- if canonical_path == filename:
- raise
- writemsg("!!! Failed to open file: '%s'\n" % tmp_name,
- noiselevel=-1)
- writemsg("!!! %s\n" % str(e), noiselevel=-1)
-
- self._real_name = filename
- tmp_name = "%s.%i" % (filename, os.getpid())
- super(atomic_ofstream, self).__init__(tmp_name, mode=mode, **kargs)
-
- def close(self):
- """Closes the temporary file, copies permissions (if possible),
- and performs the atomic replacement via os.rename(). If the abort()
- method has been called, then the temp file is closed and removed."""
- if not self.closed:
- try:
- super(atomic_ofstream, self).close()
- if not self._aborted:
- try:
- apply_stat_permissions(self.name, os.stat(self._real_name))
- except OperationNotPermitted:
- pass
- except FileNotFound:
- pass
- except OSError, oe: # from the above os.stat call
- if oe.errno in (errno.ENOENT, errno.EPERM):
- pass
- else:
- raise
- os.rename(self.name, self._real_name)
- finally:
- # Make sure we cleanup the temp file
- # even if an exception is raised.
- try:
- os.unlink(self.name)
- except OSError, oe:
- pass
-
- def abort(self):
- """If an error occurs while writing the file, the user should
- call this method in order to leave the target file unchanged.
- This will call close() automatically."""
- if not self._aborted:
- self._aborted = True
- self.close()
-
- def __del__(self):
- """If the user does not explicitely call close(), it is
- assumed that an error has occurred, so we abort()."""
- if not self.closed:
- self.abort()
- # ensure destructor from the base class is called
- base_destructor = getattr(super(atomic_ofstream, self), '__del__', None)
- if base_destructor is not None:
- base_destructor()
-
-def write_atomic(file_path, content):
- f = None
- try:
- f = atomic_ofstream(file_path)
- f.write(content)
- f.close()
- except (IOError, OSError), e:
- if f:
- f.abort()
- func_call = "write_atomic('%s')" % file_path
- if e.errno == errno.EPERM:
- raise OperationNotPermitted(func_call)
- elif e.errno == errno.EACCES:
- raise PermissionDenied(func_call)
- elif e.errno == errno.EROFS:
- raise ReadOnlyFileSystem(func_call)
- elif e.errno == errno.ENOENT:
- raise FileNotFound(file_path)
- else:
- raise
-
-def ensure_dirs(dir_path, *args, **kwargs):
- """Create a directory and call apply_permissions.
- Returns True if a directory is created or the permissions needed to be
- modified, and False otherwise."""
-
- created_dir = False
-
- try:
- os.makedirs(dir_path)
- created_dir = True
- except OSError, oe:
- func_call = "makedirs('%s')" % dir_path
- if errno.EEXIST == oe.errno:
- pass
- elif oe.errno == errno.EPERM:
- raise OperationNotPermitted(func_call)
- elif oe.errno == errno.EACCES:
- raise PermissionDenied(func_call)
- elif oe.errno == errno.EROFS:
- raise ReadOnlyFileSystem(func_call)
- else:
- raise
- perms_modified = apply_permissions(dir_path, *args, **kwargs)
- return created_dir or perms_modified
-
-class LazyItemsDict(dict):
- """A mapping object that behaves like a standard dict except that it allows
- for lazy initialization of values via callable objects. Lazy items can be
- overwritten and deleted just as normal items."""
- def __init__(self, initial_items=None):
- dict.__init__(self)
- self.lazy_items = {}
- if initial_items is not None:
- self.update(initial_items)
- def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
- """Add a lazy item for the given key. When the item is requested,
- value_callable will be called with *pargs and **kwargs arguments."""
- self.lazy_items[item_key] = (value_callable, pargs, kwargs)
- # make it show up in self.keys(), etc...
- dict.__setitem__(self, item_key, None)
- def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
- """This is like addLazyItem except value_callable will only be called
- a maximum of 1 time and the result will be cached for future requests."""
- class SingletonItem(object):
- def __init__(self, value_callable, *pargs, **kwargs):
- self._callable = value_callable
- self._pargs = pargs
- self._kwargs = kwargs
- self._called = False
- def __call__(self):
- if not self._called:
- self._called = True
- self._value = self._callable(*self._pargs, **self._kwargs)
- return self._value
- self.addLazyItem(item_key, SingletonItem(value_callable, *pargs, **kwargs))
- def update(self, map_obj):
- if isinstance(map_obj, LazyItemsDict):
- for k in map_obj:
- if k in map_obj.lazy_items:
- dict.__setitem__(self, k, None)
- else:
- dict.__setitem__(self, k, map_obj[k])
- self.lazy_items.update(map_obj.lazy_items)
- else:
- dict.update(self, map_obj)
- def __getitem__(self, item_key):
- if item_key in self.lazy_items:
- value_callable, pargs, kwargs = self.lazy_items[item_key]
- return value_callable(*pargs, **kwargs)
- else:
- return dict.__getitem__(self, item_key)
- def __setitem__(self, item_key, value):
- if item_key in self.lazy_items:
- del self.lazy_items[item_key]
- dict.__setitem__(self, item_key, value)
- def __delitem__(self, item_key):
- if item_key in self.lazy_items:
- del self.lazy_items[item_key]
- dict.__delitem__(self, item_key)
-
-class ConfigProtect(object):
- def __init__(self, myroot, protect_list, mask_list):
- self.myroot = myroot
- self.protect_list = protect_list
- self.mask_list = mask_list
- self.updateprotect()
-
- def updateprotect(self):
- """Update internal state for isprotected() calls. Nonexistent paths
- are ignored."""
- self.protect = []
- self._dirs = set()
- for x in self.protect_list:
- ppath = normalize_path(
- os.path.join(self.myroot, x.lstrip(os.path.sep)))
- mystat = None
- try:
- if stat.S_ISDIR(os.stat(ppath).st_mode):
- self._dirs.add(ppath)
- self.protect.append(ppath)
- except OSError:
- # If it doesn't exist, there's no need to protect it.
- pass
-
- self.protectmask = []
- for x in self.mask_list:
- ppath = normalize_path(
- os.path.join(self.myroot, x.lstrip(os.path.sep)))
- mystat = None
- try:
- """Use lstat so that anything, even a broken symlink can be
- protected."""
- if stat.S_ISDIR(os.lstat(ppath).st_mode):
- self._dirs.add(ppath)
- self.protectmask.append(ppath)
- """Now use stat in case this is a symlink to a directory."""
- if stat.S_ISDIR(os.stat(ppath).st_mode):
- self._dirs.add(ppath)
- except OSError:
- # If it doesn't exist, there's no need to mask it.
- pass
-
- def isprotected(self, obj):
- """Returns True if obj is protected, False otherwise. The caller must
- ensure that obj is normalized with a single leading slash. A trailing
- slash is optional for directories."""
- masked = 0
- protected = 0
- sep = os.path.sep
- for ppath in self.protect:
- if len(ppath) > masked and obj.startswith(ppath):
- if ppath in self._dirs:
- if obj != ppath and not obj.startswith(ppath + sep):
- # /etc/foo does not match /etc/foobaz
- continue
- elif obj != ppath:
- # force exact match when CONFIG_PROTECT lists a
- # non-directory
- continue
- protected = len(ppath)
- #config file management
- for pmpath in self.protectmask:
- if len(pmpath) >= protected and obj.startswith(pmpath):
- if pmpath in self._dirs:
- if obj != pmpath and \
- not obj.startswith(pmpath + sep):
- # /etc/foo does not match /etc/foobaz
- continue
- elif obj != pmpath:
- # force exact match when CONFIG_PROTECT_MASK lists
- # a non-directory
- continue
- #skip, it's in the mask
- masked = len(pmpath)
- return protected > masked
-
-def new_protect_filename(mydest, newmd5=None):
- """Resolves a config-protect filename for merging, optionally
- using the last filename if the md5 matches.
- (dest,md5) ==> 'string' --- path_to_target_filename
- (dest) ==> ('next', 'highest') --- next_target and most-recent_target
- """
-
- # config protection filename format:
- # ._cfg0000_foo
- # 0123456789012
- prot_num = -1
- last_pfile = ""
-
- if not os.path.exists(mydest):
- return mydest
-
- real_filename = os.path.basename(mydest)
- real_dirname = os.path.dirname(mydest)
- for pfile in os.listdir(real_dirname):
- if pfile[0:5] != "._cfg":
- continue
- if pfile[10:] != real_filename:
- continue
- try:
- new_prot_num = int(pfile[5:9])
- if new_prot_num > prot_num:
- prot_num = new_prot_num
- last_pfile = pfile
- except ValueError:
- continue
- prot_num = prot_num + 1
-
- new_pfile = normalize_path(os.path.join(real_dirname,
- "._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
- old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
- if last_pfile and newmd5:
- import portage_checksum
- if portage_checksum.perform_md5(
- os.path.join(real_dirname, last_pfile)) == newmd5:
- return old_pfile
- return new_pfile
+portage/util.py \ No newline at end of file
diff --git a/pym/portage_versions.py b/pym/portage_versions.py
index 63d69bac..1e05cb3b 100644..120000
--- a/pym/portage_versions.py
+++ b/pym/portage_versions.py
@@ -1,314 +1 @@
-# portage_versions.py -- core Portage functionality
-# Copyright 1998-2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-import re
-
-ver_regexp = re.compile("^(cvs\\.)?(\\d+)((\\.\\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\\d*)*)(-r(\\d+))?$")
-suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
-suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
-endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
-
-from portage_exception import InvalidData
-
-def ververify(myver, silent=1):
- if ver_regexp.match(myver):
- return 1
- else:
- if not silent:
- print "!!! syntax error in version: %s" % myver
- return 0
-
-vercmp_cache = {}
-def vercmp(ver1, ver2, silent=1):
- """
- Compare two versions
- Example usage:
- >>> from portage_versions import vercmp
- >>> vercmp('1.0-r1','1.2-r3')
- negative number
- >>> vercmp('1.3','1.2-r3')
- positive number
- >>> vercmp('1.0_p3','1.0_p3')
- 0
-
- @param pkg1: version to compare with (see ver_regexp in portage_versions.py)
- @type pkg1: string (example: "2.1.2-r3")
- @param pkg2: version to compare againts (see ver_regexp in portage_versions.py)
- @type pkg2: string (example: "2.1.2_rc5")
- @rtype: None or float
- @return:
- 1. positive if ver1 is greater than ver2
- 2. negative if ver1 is less than ver2
- 3. 0 if ver1 equals ver2
- 4. None if ver1 or ver2 are invalid (see ver_regexp in portage_versions.py)
- """
-
- if ver1 == ver2:
- return 0
- mykey=ver1+":"+ver2
- try:
- return vercmp_cache[mykey]
- except KeyError:
- pass
- match1 = ver_regexp.match(ver1)
- match2 = ver_regexp.match(ver2)
-
- # checking that the versions are valid
- if not match1 or not match1.groups():
- if not silent:
- print "!!! syntax error in version: %s" % ver1
- return None
- if not match2 or not match2.groups():
- if not silent:
- print "!!! syntax error in version: %s" % ver2
- return None
-
- # shortcut for cvs ebuilds (new style)
- if match1.group(1) and not match2.group(1):
- vercmp_cache[mykey] = 1
- return 1
- elif match2.group(1) and not match1.group(1):
- vercmp_cache[mykey] = -1
- return -1
-
- # building lists of the version parts before the suffix
- # first part is simple
- list1 = [int(match1.group(2))]
- list2 = [int(match2.group(2))]
-
- # this part would greatly benefit from a fixed-length version pattern
- if len(match1.group(3)) or len(match2.group(3)):
- vlist1 = match1.group(3)[1:].split(".")
- vlist2 = match2.group(3)[1:].split(".")
- for i in range(0, max(len(vlist1), len(vlist2))):
- # Implcit .0 is given a value of -1, so that 1.0.0 > 1.0, since it
- # would be ambiguous if two versions that aren't literally equal
- # are given the same value (in sorting, for example).
- if len(vlist1) <= i or len(vlist1[i]) == 0:
- list1.append(-1)
- list2.append(int(vlist2[i]))
- elif len(vlist2) <= i or len(vlist2[i]) == 0:
- list1.append(int(vlist1[i]))
- list2.append(-1)
- # Let's make life easy and use integers unless we're forced to use floats
- elif (vlist1[i][0] != "0" and vlist2[i][0] != "0"):
- list1.append(int(vlist1[i]))
- list2.append(int(vlist2[i]))
- # now we have to use floats so 1.02 compares correctly against 1.1
- else:
- list1.append(float("0."+vlist1[i]))
- list2.append(float("0."+vlist2[i]))
-
- # and now the final letter
- if len(match1.group(5)):
- list1.append(ord(match1.group(5)))
- if len(match2.group(5)):
- list2.append(ord(match2.group(5)))
-
- for i in range(0, max(len(list1), len(list2))):
- if len(list1) <= i:
- vercmp_cache[mykey] = -1
- return -1
- elif len(list2) <= i:
- vercmp_cache[mykey] = 1
- return 1
- elif list1[i] != list2[i]:
- vercmp_cache[mykey] = list1[i] - list2[i]
- return list1[i] - list2[i]
-
- # main version is equal, so now compare the _suffix part
- list1 = match1.group(6).split("_")[1:]
- list2 = match2.group(6).split("_")[1:]
-
- for i in range(0, max(len(list1), len(list2))):
- if len(list1) <= i:
- s1 = ("p","0")
- else:
- s1 = suffix_regexp.match(list1[i]).groups()
- if len(list2) <= i:
- s2 = ("p","0")
- else:
- s2 = suffix_regexp.match(list2[i]).groups()
- if s1[0] != s2[0]:
- return suffix_value[s1[0]] - suffix_value[s2[0]]
- if s1[1] != s2[1]:
- # it's possible that the s(1|2)[1] == ''
- # in such a case, fudge it.
- try: r1 = int(s1[1])
- except ValueError: r1 = 0
- try: r2 = int(s2[1])
- except ValueError: r2 = 0
- return r1 - r2
-
- # the suffix part is equal to, so finally check the revision
- if match1.group(10):
- r1 = int(match1.group(10))
- else:
- r1 = 0
- if match2.group(10):
- r2 = int(match2.group(10))
- else:
- r2 = 0
- vercmp_cache[mykey] = r1 - r2
- return r1 - r2
-
-def pkgcmp(pkg1, pkg2):
- """
- Compare 2 package versions created in pkgsplit format.
-
- Example usage:
- >>> from portage_versions import *
- >>> pkgcmp(pkgsplit('test-1.0-r1'),pkgsplit('test-1.2-r3'))
- -1
- >>> pkgcmp(pkgsplit('test-1.3'),pkgsplit('test-1.2-r3'))
- 1
-
- @param pkg1: package to compare with
- @type pkg1: list (example: ['test', '1.0', 'r1'])
- @param pkg2: package to compare againts
- @type pkg2: list (example: ['test', '1.0', 'r1'])
- @rtype: None or integer
- @return:
- 1. None if package names are not the same
- 2. 1 if pkg1 is greater than pkg2
- 3. -1 if pkg1 is less than pkg2
- 4. 0 if pkg1 equals pkg2
- """
- if pkg1[0] != pkg2[0]:
- return None
- mycmp=vercmp(pkg1[1],pkg2[1])
- if mycmp>0:
- return 1
- if mycmp<0:
- return -1
- r1=float(pkg1[2][1:])
- r2=float(pkg2[2][1:])
- if r1>r2:
- return 1
- if r2>r1:
- return -1
- return 0
-
-
-pkgcache={}
-
-def pkgsplit(mypkg,silent=1):
- try:
- if not pkgcache[mypkg]:
- return None
- return pkgcache[mypkg][:]
- except KeyError:
- pass
- myparts=mypkg.split("-")
-
- if len(myparts)<2:
- if not silent:
- print "!!! Name error in",mypkg+": missing a version or name part."
- pkgcache[mypkg]=None
- return None
- for x in myparts:
- if len(x)==0:
- if not silent:
- print "!!! Name error in",mypkg+": empty \"-\" part."
- pkgcache[mypkg]=None
- return None
-
- #verify rev
- revok=0
- myrev=myparts[-1]
- if len(myrev) and myrev[0]=="r":
- try:
- int(myrev[1:])
- revok=1
- except ValueError: # from int()
- pass
- if revok:
- verPos = -2
- revision = myparts[-1]
- else:
- verPos = -1
- revision = "r0"
-
- if ververify(myparts[verPos]):
- if len(myparts)== (-1*verPos):
- pkgcache[mypkg]=None
- return None
- else:
- for x in myparts[:verPos]:
- if ververify(x):
- pkgcache[mypkg]=None
- return None
- #names can't have versiony looking parts
- myval=["-".join(myparts[:verPos]),myparts[verPos],revision]
- pkgcache[mypkg]=myval
- return myval
- else:
- pkgcache[mypkg]=None
- return None
-
-_valid_category = re.compile("^\w[\w-]*")
-
-catcache={}
-def catpkgsplit(mydata,silent=1):
- """
- Takes a Category/Package-Version-Rev and returns a list of each.
-
- @param mydata: Data to split
- @type mydata: string
- @param silent: suppress error messages
- @type silent: Boolean (integer)
- @rype: list
- @return:
- 1. If each exists, it returns [cat, pkgname, version, rev]
- 2. If cat is not specificed in mydata, cat will be "null"
- 3. if rev does not exist it will be '-r0'
- 4. If cat is invalid (specified but has incorrect syntax)
- an InvalidData Exception will be thrown
- """
-
- # Categories may contain a-zA-z0-9+_- but cannot start with -
- global _valid_category
- import portage_dep
- try:
- if not catcache[mydata]:
- return None
- return catcache[mydata][:]
- except KeyError:
- pass
- mysplit=mydata.split("/")
- p_split=None
- if len(mysplit)==1:
- retval=["null"]
- p_split=pkgsplit(mydata,silent=silent)
- elif len(mysplit)==2:
- if portage_dep._dep_check_strict and \
- not _valid_category.match(mysplit[0]):
- raise InvalidData("Invalid category in %s" %mydata )
- retval=[mysplit[0]]
- p_split=pkgsplit(mysplit[1],silent=silent)
- if not p_split:
- catcache[mydata]=None
- return None
- retval.extend(p_split)
- catcache[mydata]=retval
- return retval
-
-def catsplit(mydep):
- return mydep.split("/", 1)
-
-def best(mymatches):
- """Accepts None arguments; assumes matches are valid."""
- if mymatches is None:
- return ""
- if not len(mymatches):
- return ""
- bestmatch = mymatches[0]
- p2 = catpkgsplit(bestmatch)[1:]
- for x in mymatches[1:]:
- p1 = catpkgsplit(x)[1:]
- if pkgcmp(p1, p2) > 0:
- bestmatch = x
- p2 = catpkgsplit(bestmatch)[1:]
- return bestmatch
+portage/versions.py \ No newline at end of file
diff --git a/pym/xpak.py b/pym/xpak.py
index b7ef582e..223cb31c 100644..120000
--- a/pym/xpak.py
+++ b/pym/xpak.py
@@ -1,421 +1 @@
-# Copyright 2001-2004 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-
-# The format for a tbz2/xpak:
-#
-# tbz2: tar.bz2 + xpak + (xpak_offset) + "STOP"
-# xpak: "XPAKPACK" + (index_len) + (data_len) + index + data + "XPAKSTOP"
-# index: (pathname_len) + pathname + (data_offset) + (data_len)
-# index entries are concatenated end-to-end.
-# data: concatenated data chunks, end-to-end.
-#
-# [tarball]XPAKPACKIIIIDDDD[index][data]XPAKSTOPOOOOSTOP
-#
-# (integer) == encodeint(integer) ===> 4 characters (big-endian copy)
-# '+' means concatenate the fields ===> All chunks are strings
-
-import sys,os,shutil,errno
-from stat import *
-
-def addtolist(mylist,curdir):
- """(list, dir) --- Takes an array(list) and appends all files from dir down
- the directory tree. Returns nothing. list is modified."""
- for x in os.listdir("."):
- if os.path.isdir(x):
- os.chdir(x)
- addtolist(mylist,curdir+x+"/")
- os.chdir("..")
- else:
- if curdir+x not in mylist:
- mylist.append(curdir+x)
-
-def encodeint(myint):
- """Takes a 4 byte integer and converts it into a string of 4 characters.
- Returns the characters in a string."""
- part1=chr((myint >> 24 ) & 0x000000ff)
- part2=chr((myint >> 16 ) & 0x000000ff)
- part3=chr((myint >> 8 ) & 0x000000ff)
- part4=chr(myint & 0x000000ff)
- return part1+part2+part3+part4
-
-def decodeint(mystring):
- """Takes a 4 byte string and converts it into a 4 byte integer.
- Returns an integer."""
- myint=0
- myint=myint+ord(mystring[3])
- myint=myint+(ord(mystring[2]) << 8)
- myint=myint+(ord(mystring[1]) << 16)
- myint=myint+(ord(mystring[0]) << 24)
- return myint
-
-def xpak(rootdir,outfile=None):
- """(rootdir,outfile) -- creates an xpak segment of the directory 'rootdir'
- and under the name 'outfile' if it is specified. Otherwise it returns the
- xpak segment."""
- try:
- origdir=os.getcwd()
- except SystemExit, e:
- raise
- except:
- os.chdir("/")
- origdir="/"
- os.chdir(rootdir)
- mylist=[]
-
- addtolist(mylist,"")
- mylist.sort()
- mydata = {}
- for x in mylist:
- a = open(x, "r")
- mydata[x] = a.read()
- a.close()
- os.chdir(origdir)
-
- xpak_segment = xpak_mem(mydata)
- if outfile:
- outf = open(outfile, "w")
- outf.write(xpak_segment)
- outf.close()
- else:
- return xpak_segment
-
-def xpak_mem(mydata):
- """Create an xpack segement from a map object."""
- indexglob=""
- indexpos=0
- dataglob=""
- datapos=0
- for x, newglob in mydata.iteritems():
- mydatasize=len(newglob)
- indexglob=indexglob+encodeint(len(x))+x+encodeint(datapos)+encodeint(mydatasize)
- indexpos=indexpos+4+len(x)+4+4
- dataglob=dataglob+newglob
- datapos=datapos+mydatasize
- return "XPAKPACK" \
- + encodeint(len(indexglob)) \
- + encodeint(len(dataglob)) \
- + indexglob \
- + dataglob \
- + "XPAKSTOP"
-
-def xsplit(infile):
- """(infile) -- Splits the infile into two files.
- 'infile.index' contains the index segment.
- 'infile.dat' contails the data segment."""
- myfile=open(infile,"r")
- mydat=myfile.read()
- myfile.close()
-
- splits = xsplit_mem(mydat)
- if not splits:
- return False
-
- myfile=open(infile+".index","w")
- myfile.write(splits[0])
- myfile.close()
- myfile=open(infile+".dat","w")
- myfile.write(splits[1])
- myfile.close()
- return True
-
-def xsplit_mem(mydat):
- if mydat[0:8]!="XPAKPACK":
- return None
- if mydat[-8:]!="XPAKSTOP":
- return None
- indexsize=decodeint(mydat[8:12])
- datasize=decodeint(mydat[12:16])
- return (mydat[16:indexsize+16], mydat[indexsize+16:-8])
-
-def getindex(infile):
- """(infile) -- grabs the index segment from the infile and returns it."""
- myfile=open(infile,"r")
- myheader=myfile.read(16)
- if myheader[0:8]!="XPAKPACK":
- myfile.close()
- return
- indexsize=decodeint(myheader[8:12])
- myindex=myfile.read(indexsize)
- myfile.close()
- return myindex
-
-def getboth(infile):
- """(infile) -- grabs the index and data segments from the infile.
- Returns an array [indexSegment,dataSegment]"""
- myfile=open(infile,"r")
- myheader=myfile.read(16)
- if myheader[0:8]!="XPAKPACK":
- myfile.close()
- return
- indexsize=decodeint(myheader[8:12])
- datasize=decodeint(myheader[12:16])
- myindex=myfile.read(indexsize)
- mydata=myfile.read(datasize)
- myfile.close()
- return myindex, mydata
-
-def listindex(myindex):
- """Print to the terminal the filenames listed in the indexglob passed in."""
- for x in getindex_mem(myindex):
- print x
-
-def getindex_mem(myindex):
- """Returns the filenames listed in the indexglob passed in."""
- myindexlen=len(myindex)
- startpos=0
- myret=[]
- while ((startpos+8)<myindexlen):
- mytestlen=decodeint(myindex[startpos:startpos+4])
- myret=myret+[myindex[startpos+4:startpos+4+mytestlen]]
- startpos=startpos+mytestlen+12
- return myret
-
-def searchindex(myindex,myitem):
- """(index,item) -- Finds the offset and length of the file 'item' in the
- datasegment via the index 'index' provided."""
- mylen=len(myitem)
- myindexlen=len(myindex)
- startpos=0
- while ((startpos+8)<myindexlen):
- mytestlen=decodeint(myindex[startpos:startpos+4])
- if mytestlen==mylen:
- if myitem==myindex[startpos+4:startpos+4+mytestlen]:
- #found
- datapos=decodeint(myindex[startpos+4+mytestlen:startpos+8+mytestlen]);
- datalen=decodeint(myindex[startpos+8+mytestlen:startpos+12+mytestlen]);
- return datapos, datalen
- startpos=startpos+mytestlen+12
-
-def getitem(myid,myitem):
- myindex=myid[0]
- mydata=myid[1]
- myloc=searchindex(myindex,myitem)
- if not myloc:
- return None
- return mydata[myloc[0]:myloc[0]+myloc[1]]
-
-def xpand(myid,mydest):
- myindex=myid[0]
- mydata=myid[1]
- try:
- origdir=os.getcwd()
- except SystemExit, e:
- raise
- except:
- os.chdir("/")
- origdir="/"
- os.chdir(mydest)
- myindexlen=len(myindex)
- startpos=0
- while ((startpos+8)<myindexlen):
- namelen=decodeint(myindex[startpos:startpos+4])
- datapos=decodeint(myindex[startpos+4+namelen:startpos+8+namelen]);
- datalen=decodeint(myindex[startpos+8+namelen:startpos+12+namelen]);
- myname=myindex[startpos+4:startpos+4+namelen]
- dirname=os.path.dirname(myname)
- if dirname:
- if not os.path.exists(dirname):
- os.makedirs(dirname)
- mydat=open(myname,"w")
- mydat.write(mydata[datapos:datapos+datalen])
- mydat.close()
- startpos=startpos+namelen+12
- os.chdir(origdir)
-
-class tbz2:
- def __init__(self,myfile):
- self.file=myfile
- self.filestat=None
- self.index=""
- self.infosize=0
- self.xpaksize=0
- self.indexsize=None
- self.datasize=None
- self.indexpos=None
- self.datapos=None
- self.scan()
-
- def decompose(self,datadir,cleanup=1):
- """Alias for unpackinfo() --- Complement to recompose() but optionally
- deletes the destination directory. Extracts the xpak from the tbz2 into
- the directory provided. Raises IOError if scan() fails.
- Returns result of upackinfo()."""
- if not self.scan():
- raise IOError
- if cleanup:
- self.cleanup(datadir)
- if not os.path.exists(datadir):
- os.makedirs(datadir)
- return self.unpackinfo(datadir)
- def compose(self,datadir,cleanup=0):
- """Alias for recompose()."""
- return recompose(datadir,cleanup)
- def recompose(self,datadir,cleanup=0):
- """Creates an xpak segment from the datadir provided, truncates the tbz2
- to the end of regular data if an xpak segment already exists, and adds
- the new segment to the file with terminating info."""
- xpdata = xpak(datadir)
- self.recompose_mem(xpdata)
- if cleanup:
- self.cleanup(datadir)
-
- def recompose_mem(self, xpdata):
- self.scan() # Don't care about condition... We'll rewrite the data anyway.
- myfile=open(self.file,"a+")
- if not myfile:
- raise IOError
- myfile.seek(-self.xpaksize,2) # 0,2 or -0,2 just mean EOF.
- myfile.truncate()
- myfile.write(xpdata+encodeint(len(xpdata))+"STOP")
- myfile.flush()
- myfile.close()
- return 1
-
- def cleanup(self, datadir):
- datadir_split = os.path.split(datadir)
- if len(datadir_split) >= 2 and len(datadir_split[1]) > 0:
- # This is potentially dangerous,
- # thus the above sanity check.
- try:
- shutil.rmtree(datadir)
- except OSError, oe:
- if oe.errno == errno.ENOENT:
- pass
- else:
- raise oe
-
- def scan(self):
- """Scans the tbz2 to locate the xpak segment and setup internal values.
- This function is called by relevant functions already."""
- try:
- mystat=os.stat(self.file)
- if self.filestat:
- changed=0
- for x in [ST_SIZE, ST_MTIME, ST_CTIME]:
- if mystat[x] != self.filestat[x]:
- changed=1
- if not changed:
- return 1
- self.filestat=mystat
- a=open(self.file,"r")
- a.seek(-16,2)
- trailer=a.read()
- self.infosize=0
- self.xpaksize=0
- if trailer[-4:]!="STOP":
- a.close()
- return 0
- if trailer[0:8]!="XPAKSTOP":
- a.close()
- return 0
- self.infosize=decodeint(trailer[8:12])
- self.xpaksize=self.infosize+8
- a.seek(-(self.xpaksize),2)
- header=a.read(16)
- if header[0:8]!="XPAKPACK":
- a.close()
- return 0
- self.indexsize=decodeint(header[8:12])
- self.datasize=decodeint(header[12:16])
- self.indexpos=a.tell()
- self.index=a.read(self.indexsize)
- self.datapos=a.tell()
- a.close()
- return 2
- except SystemExit, e:
- raise
- except:
- return 0
-
- def filelist(self):
- """Return an array of each file listed in the index."""
- if not self.scan():
- return None
- return getindex_mem(self.index)
-
- def getfile(self,myfile,mydefault=None):
- """Finds 'myfile' in the data segment and returns it."""
- if not self.scan():
- return None
- myresult=searchindex(self.index,myfile)
- if not myresult:
- return mydefault
- a=open(self.file,"r")
- a.seek(self.datapos+myresult[0],0)
- myreturn=a.read(myresult[1])
- a.close()
- return myreturn
-
- def getelements(self,myfile):
- """A split/array representation of tbz2.getfile()"""
- mydat=self.getfile(myfile)
- if not mydat:
- return []
- return mydat.split()
-
- def unpackinfo(self,mydest):
- """Unpacks all the files from the dataSegment into 'mydest'."""
- if not self.scan():
- return 0
- try:
- origdir=os.getcwd()
- except SystemExit, e:
- raise
- except:
- os.chdir("/")
- origdir="/"
- a=open(self.file,"r")
- if not os.path.exists(mydest):
- os.makedirs(mydest)
- os.chdir(mydest)
- startpos=0
- while ((startpos+8)<self.indexsize):
- namelen=decodeint(self.index[startpos:startpos+4])
- datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
- datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
- myname=self.index[startpos+4:startpos+4+namelen]
- dirname=os.path.dirname(myname)
- if dirname:
- if not os.path.exists(dirname):
- os.makedirs(dirname)
- mydat=open(myname,"w")
- a.seek(self.datapos+datapos)
- mydat.write(a.read(datalen))
- mydat.close()
- startpos=startpos+namelen+12
- a.close()
- os.chdir(origdir)
- return 1
-
- def get_data(self):
- """Returns all the files from the dataSegment as a map object."""
- if not self.scan():
- return 0
- a = open(self.file, "r")
- mydata = {}
- startpos=0
- while ((startpos+8)<self.indexsize):
- namelen=decodeint(self.index[startpos:startpos+4])
- datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
- datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
- myname=self.index[startpos+4:startpos+4+namelen]
- a.seek(self.datapos+datapos)
- mydata[myname] = a.read(datalen)
- startpos=startpos+namelen+12
- a.close()
- return mydata
-
- def getboth(self):
- """Returns an array [indexSegment,dataSegment]"""
- if not self.scan():
- return None
-
- a = open(self.file,"r")
- a.seek(self.datapos)
- mydata =a.read(self.datasize)
- a.close()
-
- return self.index, mydata
-
+portage/xpak.py \ No newline at end of file