aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMagnus Granberg <zorry@gentoo.org>2020-05-16 22:09:58 +0200
committerMagnus Granberg <zorry@gentoo.org>2020-05-16 22:09:58 +0200
commit6789c8156cea8517e340cf25774d3238e546abb5 (patch)
tree4bf5646728edb529158eddf8428aed4f31d118c8
parentAdd usepkg and buildpkg as project options (diff)
downloadtinderbox-cluster-6789c8156cea8517e340cf25774d3238e546abb5.tar.gz
tinderbox-cluster-6789c8156cea8517e340cf25774d3238e546abb5.tar.bz2
tinderbox-cluster-6789c8156cea8517e340cf25774d3238e546abb5.zip
Add support to use the db insted of the Package file for bin packages
Signed-off-by: Magnus Granberg <zorry@gentoo.org>
-rw-r--r--gosbs/_emerge/Binpkg.py466
-rw-r--r--gosbs/_emerge/MergeListItem.py129
-rw-r--r--gosbs/_emerge/Scheduler.py3951
-rw-r--r--gosbs/_emerge/actions.py3
-rw-r--r--gosbs/builder/binary.py296
-rw-r--r--gosbs/builder/build_checker.py17
-rw-r--r--gosbs/common/binary.py32
-rw-r--r--gosbs/db/sqlalchemy/models.py75
-rw-r--r--gosbs/objects/__init__.py5
-rw-r--r--gosbs/objects/binary.py350
-rw-r--r--gosbs/objects/binary_header.py362
-rw-r--r--gosbs/portage/__init__.py72
-rw-r--r--gosbs/portage/dbapi/__init__.py0
-rw-r--r--gosbs/portage/dbapi/bintree.py1802
-rw-r--r--gosbs/tasks/builder/build_pkg.py4
15 files changed, 5543 insertions, 2021 deletions
diff --git a/gosbs/_emerge/Binpkg.py b/gosbs/_emerge/Binpkg.py
new file mode 100644
index 0000000..30e59d5
--- /dev/null
+++ b/gosbs/_emerge/Binpkg.py
@@ -0,0 +1,466 @@
+# Copyright 1999-2019 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import functools
+
+import _emerge.emergelog
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.SpawnProcess import SpawnProcess
+from portage.eapi import eapi_exports_replace_vars
+from portage.util import ensure_dirs
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+from portage.util.futures.compat_coroutine import coroutine
+import portage
+from portage import os
+from portage import shutil
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import io
+import logging
+
+class Binpkg(CompositeTask):
+
+ __slots__ = ("find_blockers",
+ "ldpath_mtimes", "logger", "opts",
+ "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
+ ("_bintree", "_build_dir", "_build_prefix",
+ "_ebuild_path", "_fetched_pkg",
+ "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
+
+ def _writemsg_level(self, msg, level=0, noiselevel=0):
+ self.scheduler.output(msg, level=level, noiselevel=noiselevel,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+ def _start(self):
+
+ pkg = self.pkg
+ settings = self.settings
+ settings.setcpv(pkg)
+ self._tree = "bintree"
+ self._bintree = self.pkg.root_config.trees[self._tree]
+ self._verify = not self.opts.pretend
+
+ # Use realpath like doebuild_environment() does, since we assert
+ # that this path is literally identical to PORTAGE_BUILDDIR.
+ dir_path = os.path.join(os.path.realpath(settings["PORTAGE_TMPDIR"]),
+ "portage", pkg.category, pkg.pf)
+ self._image_dir = os.path.join(dir_path, "image")
+ self._infloc = os.path.join(dir_path, "build-info")
+ self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
+ settings["EBUILD"] = self._ebuild_path
+ portage.doebuild_environment(self._ebuild_path, 'setup',
+ settings=self.settings, db=self._bintree.dbapi)
+ if dir_path != self.settings['PORTAGE_BUILDDIR']:
+ raise AssertionError("'%s' != '%s'" % \
+ (dir_path, self.settings['PORTAGE_BUILDDIR']))
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=settings)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ if eapi_exports_replace_vars(settings["EAPI"]):
+ vardb = self.pkg.root_config.trees["vartree"].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(x) \
+ for x in vardb.match(self.pkg.slot_atom) + \
+ vardb.match('='+self.pkg.cpv)))
+
+ # The prefetcher has already completed or it
+ # could be running now. If it's running now,
+ # wait for it to complete since it holds
+ # a lock on the file being fetched. The
+ # portage.locks functions are only designed
+ # to work between separate processes. Since
+ # the lock is held by the current process,
+ # use the scheduler and fetcher methods to
+ # synchronize with the fetcher.
+ prefetcher = self.prefetcher
+ if prefetcher is None:
+ pass
+ elif prefetcher.isAlive() and \
+ prefetcher.poll() is None:
+
+ if not self.background:
+ fetch_log = os.path.join(
+ _emerge.emergelog._emerge_log_dir, 'emerge-fetch.log')
+ msg = (
+ 'Fetching in the background:',
+ prefetcher.pkg_path,
+ 'To view fetch progress, run in another terminal:',
+ 'tail -f %s' % fetch_log,
+ )
+ out = portage.output.EOutput()
+ for l in msg:
+ out.einfo(l)
+
+ self._current_task = prefetcher
+ prefetcher.addExitListener(self._prefetch_exit)
+ return
+
+ self._prefetch_exit(prefetcher)
+
+ def _prefetch_exit(self, prefetcher):
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if not (self.opts.pretend or self.opts.fetchonly):
+ self._start_task(
+ AsyncTaskFuture(future=self._build_dir.async_lock()),
+ self._start_fetcher)
+ else:
+ self._start_fetcher()
+
+ def _start_fetcher(self, lock_task=None):
+ if lock_task is not None:
+ self._assert_current(lock_task)
+ if lock_task.cancelled:
+ self._default_final_exit(lock_task)
+ return
+
+ lock_task.future.result()
+ # Initialize PORTAGE_LOG_FILE (clean_log won't work without it).
+ portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+ # If necessary, discard old log so that we don't
+ # append to it.
+ self._build_dir.clean_log()
+
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ fetcher = None
+
+ if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
+
+ fetcher = BinpkgFetcher(background=self.background,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
+ pretend=self.opts.pretend, scheduler=self.scheduler)
+
+ msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv,
+ fetcher.pkg_path)
+ short_msg = "emerge: (%s of %s) %s Fetch" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ self.logger.log(msg, short_msg=short_msg)
+
+ # Allow the Scheduler's fetch queue to control the
+ # number of concurrent fetchers.
+ fetcher.addExitListener(self._fetcher_exit)
+ self._task_queued(fetcher)
+ self.scheduler.fetch.schedule(fetcher)
+ return
+
+ self._fetcher_exit(fetcher)
+
+ def _fetcher_exit(self, fetcher):
+
+ # The fetcher only has a returncode when
+ # --getbinpkg is enabled.
+ if fetcher is not None:
+ self._fetched_pkg = fetcher.pkg_path
+ if self._default_exit(fetcher) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ if self.opts.pretend:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ verifier = None
+ if self._verify:
+ if self._fetched_pkg:
+ path = self._fetched_pkg
+ else:
+ path = self.pkg.root_config.trees["bintree"].getname(
+ self.pkg.cpv)
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ verifier = BinpkgVerifier(background=self.background,
+ logfile=logfile, pkg=self.pkg, scheduler=self.scheduler,
+ _pkg_path=path)
+ self._start_task(verifier, self._verifier_exit)
+ return
+
+ self._verifier_exit(verifier)
+
+ def _verifier_exit(self, verifier):
+ if verifier is not None and \
+ self._default_exit(verifier) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ logger = self.logger
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+
+ if self._fetched_pkg:
+ pkg_path = self._bintree.getname(
+ self._bintree.inject(pkg.cpv,
+ filename=self._fetched_pkg),
+ allocate_new=False)
+ else:
+ pkg_path = self.pkg.root_config.trees["bintree"].getname(
+ self.pkg.cpv)
+ self.pkg.root_config.trees["bintree"].touch(pkg)
+
+ # This gives bashrc users an opportunity to do various things
+ # such as remove binary packages after they're installed.
+ if pkg_path is not None:
+ self.settings["PORTAGE_BINPKG_FILE"] = pkg_path
+ self._pkg_path = pkg_path
+
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ if logfile is not None and os.path.isfile(logfile):
+ # Remove fetch log after successful fetch.
+ try:
+ os.unlink(logfile)
+ except OSError:
+ pass
+
+ if self.opts.fetchonly:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ msg = " === (%s of %s) Merging Binary (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
+ short_msg = "emerge: (%s of %s) %s Merge Binary" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ phase = "clean"
+ settings = self.settings
+ ebuild_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler,
+ settings=settings)
+
+ self._start_task(ebuild_phase, self._clean_exit)
+
+ def _clean_exit(self, clean_phase):
+ if self._default_exit(clean_phase) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ self._start_task(
+ AsyncTaskFuture(future=self._unpack_metadata()),
+ self._unpack_metadata_exit)
+
+ @coroutine
+ def _unpack_metadata(self):
+
+ dir_path = self.settings['PORTAGE_BUILDDIR']
+
+ infloc = self._infloc
+ pkg = self.pkg
+ pkg_path = self._pkg_path
+
+ dir_mode = 0o755
+ for mydir in (dir_path, self._image_dir, infloc):
+ portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
+ gid=portage.data.portage_gid, mode=dir_mode)
+
+ # This initializes PORTAGE_LOG_FILE.
+ portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+ self._writemsg_level(">>> Extracting info\n")
+
+ yield self._bintree.dbapi.unpack_metadata(self.settings, infloc)
+ check_missing_metadata = ("CATEGORY", "PF")
+ for k, v in zip(check_missing_metadata,
+ self._bintree.dbapi.aux_get(self.pkg.cpv, check_missing_metadata)):
+ if v:
+ continue
+ elif k == "CATEGORY":
+ v = pkg.category
+ elif k == "PF":
+ v = pkg.pf
+ else:
+ continue
+
+ f = io.open(_unicode_encode(os.path.join(infloc, k),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'],
+ errors='backslashreplace')
+ try:
+ f.write(_unicode_decode(v + "\n"))
+ finally:
+ f.close()
+
+ # Store the md5sum in the vdb.
+ if pkg_path is not None:
+ md5sum, = self._bintree.dbapi.aux_get(self.pkg.cpv, ['MD5'])
+ if not md5sum:
+ md5sum = portage.checksum.perform_md5(pkg_path)
+ with io.open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'], errors='strict') as f:
+ f.write(_unicode_decode('{}\n'.format(md5sum)))
+
+ env_extractor = BinpkgEnvExtractor(background=self.background,
+ scheduler=self.scheduler, settings=self.settings)
+ env_extractor.start()
+ yield env_extractor.async_wait()
+ if env_extractor.returncode != os.EX_OK:
+ raise portage.exception.PortageException('failed to extract environment for {}'.format(self.pkg.cpv))
+
+ def _unpack_metadata_exit(self, unpack_metadata):
+ if self._default_exit(unpack_metadata) != os.EX_OK:
+ unpack_metadata.future.result()
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ setup_phase = EbuildPhase(background=self.background,
+ phase="setup", scheduler=self.scheduler,
+ settings=self.settings)
+
+ setup_phase.addExitListener(self._setup_exit)
+ self._task_queued(setup_phase)
+ self.scheduler.scheduleSetup(setup_phase)
+
+ def _setup_exit(self, setup_phase):
+ if self._default_exit(setup_phase) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
+ self._start_task(
+ AsyncTaskFuture(future=self._bintree.dbapi.unpack_contents(
+ self.settings,
+ self._image_dir)),
+ self._unpack_contents_exit)
+
+ def _unpack_contents_exit(self, unpack_contents):
+ if self._default_exit(unpack_contents) != os.EX_OK:
+ unpack_contents.future.result()
+ self._writemsg_level("!!! Error Extracting '%s'\n" % \
+ self._pkg_path, noiselevel=-1, level=logging.ERROR)
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ try:
+ with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['repo.content'], errors='replace') as f:
+ self._build_prefix = f.read().rstrip('\n')
+ except IOError:
+ self._build_prefix = ""
+
+ if self._build_prefix == self.settings["EPREFIX"]:
+ ensure_dirs(self.settings["ED"])
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ env = self.settings.environ()
+ env["PYTHONPATH"] = self.settings["PORTAGE_PYTHONPATH"]
+ chpathtool = SpawnProcess(
+ args=[portage._python_interpreter,
+ os.path.join(self.settings["PORTAGE_BIN_PATH"], "chpathtool.py"),
+ self.settings["D"], self._build_prefix, self.settings["EPREFIX"]],
+ background=self.background, env=env,
+ scheduler=self.scheduler,
+ logfile=self.settings.get('PORTAGE_LOG_FILE'))
+ self._writemsg_level(">>> Adjusting Prefix to %s\n" % self.settings["EPREFIX"])
+ self._start_task(chpathtool, self._chpathtool_exit)
+
+ def _chpathtool_exit(self, chpathtool):
+ if self._final_exit(chpathtool) != os.EX_OK:
+ self._writemsg_level("!!! Error Adjusting Prefix to %s\n" %
+ (self.settings["EPREFIX"],),
+ noiselevel=-1, level=logging.ERROR)
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ # We want to install in "our" prefix, not the binary one
+ with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
+ encoding=_encodings['fs'], errors='strict'), mode='w',
+ encoding=_encodings['repo.content'], errors='strict') as f:
+ f.write(self.settings["EPREFIX"] + "\n")
+
+ # Move the files to the correct location for merge.
+ image_tmp_dir = os.path.join(
+ self.settings["PORTAGE_BUILDDIR"], "image_tmp")
+ build_d = os.path.join(self.settings["D"],
+ self._build_prefix.lstrip(os.sep)).rstrip(os.sep)
+ if not os.path.isdir(build_d):
+ # Assume this is a virtual package or something.
+ shutil.rmtree(self._image_dir)
+ ensure_dirs(self.settings["ED"])
+ else:
+ os.rename(build_d, image_tmp_dir)
+ if build_d != self._image_dir:
+ shutil.rmtree(self._image_dir)
+ ensure_dirs(os.path.dirname(self.settings["ED"].rstrip(os.sep)))
+ os.rename(image_tmp_dir, self.settings["ED"])
+
+ self.wait()
+
+ def _async_unlock_builddir(self, returncode=None):
+ """
+ Release the lock asynchronously, and if a returncode parameter
+ is given then set self.returncode and notify exit listeners.
+ """
+ if self.opts.pretend or self.opts.fetchonly:
+ if returncode is not None:
+ self.returncode = returncode
+ self._async_wait()
+ return
+ if returncode is not None:
+ # The returncode will be set after unlock is complete.
+ self.returncode = None
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self._start_task(
+ AsyncTaskFuture(future=self._build_dir.async_unlock()),
+ functools.partial(self._unlock_builddir_exit, returncode=returncode))
+
+ def _unlock_builddir_exit(self, unlock_task, returncode=None):
+ self._assert_current(unlock_task)
+ if unlock_task.cancelled and returncode is not None:
+ self._default_final_exit(unlock_task)
+ return
+
+ # Normally, async_unlock should not raise an exception here.
+ unlock_task.future.cancelled() or unlock_task.future.result()
+ if returncode is not None:
+ self.returncode = returncode
+ self._async_wait()
+
+ def create_install_task(self):
+ task = EbuildMerge(exit_hook=self._install_exit,
+ find_blockers=self.find_blockers,
+ ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
+ pkg=self.pkg, pkg_count=self.pkg_count,
+ pkg_path=self._pkg_path, scheduler=self.scheduler,
+ settings=self.settings, tree=self._tree,
+ world_atom=self.world_atom)
+ return task
+
+ def _install_exit(self, task):
+ """
+ @returns: Future, result is the returncode from an
+ EbuildBuildDir.async_unlock() task
+ """
+ self.settings.pop("PORTAGE_BINPKG_FILE", None)
+ if task.returncode == os.EX_OK and \
+ 'binpkg-logs' not in self.settings.features and \
+ self.settings.get("PORTAGE_LOG_FILE"):
+ try:
+ os.unlink(self.settings["PORTAGE_LOG_FILE"])
+ except OSError:
+ pass
+ self._async_unlock_builddir()
+ if self._current_task is None:
+ result = self.scheduler.create_future()
+ self.scheduler.call_soon(result.set_result, os.EX_OK)
+ else:
+ result = self._current_task.async_wait()
+ return result
diff --git a/gosbs/_emerge/MergeListItem.py b/gosbs/_emerge/MergeListItem.py
new file mode 100644
index 0000000..b0919fe
--- /dev/null
+++ b/gosbs/_emerge/MergeListItem.py
@@ -0,0 +1,129 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.dep import _repo_separator
+from portage.output import colorize
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from gosbs._emerge.Binpkg import Binpkg
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildBuild import EbuildBuild
+from _emerge.PackageUninstall import PackageUninstall
+
+class MergeListItem(CompositeTask):
+
+ """
+ TODO: For parallel scheduling, everything here needs asynchronous
+ execution support (start, poll, and wait methods).
+ """
+
+ __slots__ = ("args_set",
+ "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
+ "find_blockers", "logger", "mtimedb", "pkg",
+ "pkg_count", "pkg_to_replace", "prefetcher",
+ "settings", "statusMessage", "world_atom") + \
+ ("_install_task",)
+
+ def _start(self):
+
+ pkg = self.pkg
+ build_opts = self.build_opts
+
+ if pkg.installed:
+ # uninstall, executed by self.merge()
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ args_set = self.args_set
+ find_blockers = self.find_blockers
+ logger = self.logger
+ mtimedb = self.mtimedb
+ pkg_count = self.pkg_count
+ scheduler = self.scheduler
+ settings = self.settings
+ world_atom = self.world_atom
+ ldpath_mtimes = mtimedb["ldpath"]
+
+ action_desc = "Emerging"
+ preposition = "for"
+ pkg_color = "PKG_MERGE"
+ if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
+ action_desc += " binary"
+
+ if build_opts.fetchonly:
+ action_desc = "Fetching"
+
+ msg = "%s (%s of %s) %s" % \
+ (action_desc,
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
+ colorize(pkg_color, pkg.cpv + _repo_separator + pkg.repo))
+
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ if not build_opts.pretend:
+ self.statusMessage(msg)
+ logger.log(" >>> emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ if pkg.type_name == "ebuild":
+
+ build = EbuildBuild(args_set=args_set,
+ background=self.background,
+ config_pool=self.config_pool,
+ find_blockers=find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger,
+ opts=build_opts, pkg=pkg, pkg_count=pkg_count,
+ prefetcher=self.prefetcher, scheduler=scheduler,
+ settings=settings, world_atom=world_atom)
+
+ self._install_task = build
+ self._start_task(build, self._default_final_exit)
+ return
+
+ elif pkg.type_name == "binary":
+
+ binpkg = Binpkg(background=self.background,
+ find_blockers=find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger,
+ opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
+ prefetcher=self.prefetcher, settings=settings,
+ scheduler=scheduler, world_atom=world_atom)
+
+ self._install_task = binpkg
+ self._start_task(binpkg, self._default_final_exit)
+ return
+
+ def create_install_task(self):
+
+ pkg = self.pkg
+ build_opts = self.build_opts
+ mtimedb = self.mtimedb
+ scheduler = self.scheduler
+ settings = self.settings
+ world_atom = self.world_atom
+ ldpath_mtimes = mtimedb["ldpath"]
+
+ if pkg.installed:
+ if not (build_opts.buildpkgonly or \
+ build_opts.fetchonly or build_opts.pretend):
+
+ task = PackageUninstall(background=self.background,
+ ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
+ pkg=pkg, scheduler=scheduler, settings=settings,
+ world_atom=world_atom)
+
+ else:
+ task = AsynchronousTask()
+
+ elif build_opts.fetchonly or \
+ build_opts.buildpkgonly:
+ task = AsynchronousTask()
+ else:
+ task = self._install_task.create_install_task()
+
+ return task
diff --git a/gosbs/_emerge/Scheduler.py b/gosbs/_emerge/Scheduler.py
index 6f45640..668ba56 100644
--- a/gosbs/_emerge/Scheduler.py
+++ b/gosbs/_emerge/Scheduler.py
@@ -34,7 +34,7 @@ from portage.util._eventloop.EventLoop import EventLoop
from portage.package.ebuild.digestcheck import digestcheck
from portage.package.ebuild.digestgen import digestgen
from portage.package.ebuild.doebuild import (_check_temp_dir,
- _prepare_self_update)
+ _prepare_self_update)
from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
import _emerge
@@ -57,14 +57,17 @@ from _emerge.getloadavg import getloadavg
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
from _emerge.JobStatusDisplay import JobStatusDisplay
-from _emerge.MergeListItem import MergeListItem
+from gosbs._emerge.MergeListItem import MergeListItem
from _emerge.Package import Package
from _emerge.PackageMerge import PackageMerge
from _emerge.PollScheduler import PollScheduler
from _emerge.SequentialTaskQueue import SequentialTaskQueue
+from gosbs.builder.build_checker import check_build
+from gosbs.context import get_admin_context
+
if sys.hexversion >= 0x3000000:
- basestring = str
+ basestring = str
# enums
FAILURE = 1
@@ -72,1971 +75,1977 @@ FAILURE = 1
class Scheduler(PollScheduler):
- # max time between loadavg checks (seconds)
- _loadavg_latency = 30
-
- # max time between display status updates (seconds)
- _max_display_latency = 3
-
- _opts_ignore_blockers = \
- frozenset(["--buildpkgonly",
- "--fetchonly", "--fetch-all-uri",
- "--nodeps", "--pretend"])
-
- _opts_no_background = \
- frozenset(["--pretend",
- "--fetchonly", "--fetch-all-uri"])
-
- _opts_no_self_update = frozenset(["--buildpkgonly",
- "--fetchonly", "--fetch-all-uri", "--pretend"])
-
- class _iface_class(SchedulerInterface):
- __slots__ = ("fetch",
- "scheduleSetup", "scheduleUnpack")
-
- class _fetch_iface_class(SlotObject):
- __slots__ = ("log_file", "schedule")
-
- _task_queues_class = slot_dict_class(
- ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
-
- class _build_opts_class(SlotObject):
- __slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
- "fetch_all_uri", "fetchonly", "pretend")
-
- class _binpkg_opts_class(SlotObject):
- __slots__ = ("fetchonly", "getbinpkg", "pretend")
-
- class _pkg_count_class(SlotObject):
- __slots__ = ("curval", "maxval")
-
- class _emerge_log_class(SlotObject):
- __slots__ = ("xterm_titles",)
-
- def log(self, *pargs, **kwargs):
- if not self.xterm_titles:
- # Avoid interference with the scheduler's status display.
- kwargs.pop("short_msg", None)
- emergelog(self.xterm_titles, *pargs, **kwargs)
-
- class _failed_pkg(SlotObject):
- __slots__ = ("build_dir", "build_log", "pkg",
- "postinst_failure", "returncode")
-
- class _ConfigPool(object):
- """Interface for a task to temporarily allocate a config
- instance from a pool. This allows a task to be constructed
- long before the config instance actually becomes needed, like
- when prefetchers are constructed for the whole merge list."""
- __slots__ = ("_root", "_allocate", "_deallocate")
- def __init__(self, root, allocate, deallocate):
- self._root = root
- self._allocate = allocate
- self._deallocate = deallocate
- def allocate(self):
- return self._allocate(self._root)
- def deallocate(self, settings):
- self._deallocate(settings)
-
- class _unknown_internal_error(portage.exception.PortageException):
- """
- Used internally to terminate scheduling. The specific reason for
- the failure should have been dumped to stderr.
- """
- def __init__(self, value=""):
- portage.exception.PortageException.__init__(self, value)
-
- def __init__(self, settings, trees, mtimedb, myopts,
- spinner, mergelist=None, favorites=None, graph_config=None):
- PollScheduler.__init__(self, main=True)
-
- if mergelist is not None:
- warnings.warn("The mergelist parameter of the " + \
- "_emerge.Scheduler constructor is now unused. Use " + \
- "the graph_config parameter instead.",
- DeprecationWarning, stacklevel=2)
-
- self.settings = settings
- self.target_root = settings["EROOT"]
- self.trees = trees
- self.myopts = myopts
- self._spinner = spinner
- self._mtimedb = mtimedb
- self._favorites = favorites
- self._args_set = InternalPackageSet(favorites, allow_repo=True)
- self._build_opts = self._build_opts_class()
-
- for k in self._build_opts.__slots__:
- setattr(self._build_opts, k, myopts.get("--" + k.replace("_", "-")))
- self._build_opts.buildpkg_exclude = InternalPackageSet( \
- initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
- allow_wildcard=True, allow_repo=True)
- if "mirror" in self.settings.features:
- self._build_opts.fetch_all_uri = True
-
- self._binpkg_opts = self._binpkg_opts_class()
- for k in self._binpkg_opts.__slots__:
- setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
-
- self.curval = 0
- self._logger = self._emerge_log_class()
- self._task_queues = self._task_queues_class()
- for k in self._task_queues.allowed_keys:
- setattr(self._task_queues, k,
- SequentialTaskQueue())
-
- # Holds merges that will wait to be executed when no builds are
- # executing. This is useful for system packages since dependencies
- # on system packages are frequently unspecified. For example, see
- # bug #256616.
- self._merge_wait_queue = deque()
- # Holds merges that have been transfered from the merge_wait_queue to
- # the actual merge queue. They are removed from this list upon
- # completion. Other packages can start building only when this list is
- # empty.
- self._merge_wait_scheduled = []
-
- # Holds system packages and their deep runtime dependencies. Before
- # being merged, these packages go to merge_wait_queue, to be merged
- # when no other packages are building.
- self._deep_system_deps = set()
-
- # Holds packages to merge which will satisfy currently unsatisfied
- # deep runtime dependencies of system packages. If this is not empty
- # then no parallel builds will be spawned until it is empty. This
- # minimizes the possibility that a build will fail due to the system
- # being in a fragile state. For example, see bug #259954.
- self._unsatisfied_system_deps = set()
-
- self._status_display = JobStatusDisplay(
- xterm_titles=('notitles' not in settings.features))
- self._max_load = myopts.get("--load-average")
- max_jobs = myopts.get("--jobs")
- if max_jobs is None:
- max_jobs = 1
- self._set_max_jobs(max_jobs)
- self._running_root = trees[trees._running_eroot]["root_config"]
- self.edebug = 0
- if settings.get("PORTAGE_DEBUG", "") == "1":
- self.edebug = 1
- self.pkgsettings = {}
- self._config_pool = {}
- for root in self.trees:
- self._config_pool[root] = []
-
- self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
- 'emerge-fetch.log')
- fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
- schedule=self._schedule_fetch)
- self._sched_iface = self._iface_class(
- self._event_loop,
- is_background=self._is_background,
- fetch=fetch_iface,
- scheduleSetup=self._schedule_setup,
- scheduleUnpack=self._schedule_unpack)
-
- self._prefetchers = weakref.WeakValueDictionary()
- self._pkg_queue = []
- self._jobs = 0
- self._running_tasks = {}
- self._completed_tasks = set()
- self._main_exit = None
- self._main_loadavg_handle = None
- self._schedule_merge_wakeup_task = None
-
- self._failed_pkgs = []
- self._failed_pkgs_all = []
- self._failed_pkgs_die_msgs = []
- self._post_mod_echo_msgs = []
- self._parallel_fetch = False
- self._init_graph(graph_config)
- merge_count = len([x for x in self._mergelist \
- if isinstance(x, Package) and x.operation == "merge"])
- self._pkg_count = self._pkg_count_class(
- curval=0, maxval=merge_count)
- self._status_display.maxval = self._pkg_count.maxval
-
- # The load average takes some time to respond when new
- # jobs are added, so we need to limit the rate of adding
- # new jobs.
- self._job_delay_max = 5
- self._previous_job_start_time = None
- self._job_delay_timeout_id = None
-
- # The load average takes some time to respond when after
- # a SIGSTOP/SIGCONT cycle, so delay scheduling for some
- # time after SIGCONT is received.
- self._sigcont_delay = 5
- self._sigcont_time = None
-
- # This is used to memoize the _choose_pkg() result when
- # no packages can be chosen until one of the existing
- # jobs completes.
- self._choose_pkg_return_early = False
-
- features = self.settings.features
- if "parallel-fetch" in features and \
- not ("--pretend" in self.myopts or \
- "--fetch-all-uri" in self.myopts or \
- "--fetchonly" in self.myopts):
- if "distlocks" not in features:
- portage.writemsg(red("!!!")+"\n", noiselevel=-1)
- portage.writemsg(red("!!!")+" parallel-fetching " + \
- "requires the distlocks feature enabled"+"\n",
- noiselevel=-1)
- portage.writemsg(red("!!!")+" you have it disabled, " + \
- "thus parallel-fetching is being disabled"+"\n",
- noiselevel=-1)
- portage.writemsg(red("!!!")+"\n", noiselevel=-1)
- elif merge_count > 1:
- self._parallel_fetch = True
-
- if self._parallel_fetch:
- # clear out existing fetch log if it exists
- try:
- open(self._fetch_log, 'w').close()
- except EnvironmentError:
- pass
-
- self._running_portage = None
- portage_match = self._running_root.trees["vartree"].dbapi.match(
- portage.const.PORTAGE_PACKAGE_ATOM)
- if portage_match:
- cpv = portage_match.pop()
- self._running_portage = self._pkg(cpv, "installed",
- self._running_root, installed=True)
-
- def _handle_self_update(self):
-
- if self._opts_no_self_update.intersection(self.myopts):
- return os.EX_OK
-
- for x in self._mergelist:
- if not isinstance(x, Package):
- continue
- if x.operation != "merge":
- continue
- if x.root != self._running_root.root:
- continue
- if not portage.dep.match_from_list(
- portage.const.PORTAGE_PACKAGE_ATOM, [x]):
- continue
- rval = _check_temp_dir(self.settings)
- if rval != os.EX_OK:
- return rval
- _prepare_self_update(self.settings)
- break
-
- return os.EX_OK
-
- def _terminate_tasks(self):
- self._status_display.quiet = True
- for task in list(self._running_tasks.values()):
- if task.isAlive():
- # This task should keep the main loop running until
- # it has had an opportunity to clean up after itself.
- # Rely on its exit hook to remove it from
- # self._running_tasks when it has finished cleaning up.
- task.cancel()
- else:
- # This task has been waiting to be started in one of
- # self._task_queues which are all cleared below. It
- # will never be started, so purged it from
- # self._running_tasks so that it won't keep the main
- # loop running.
- del self._running_tasks[id(task)]
-
- for q in self._task_queues.values():
- q.clear()
-
- def _init_graph(self, graph_config):
- """
- Initialization structures used for dependency calculations
- involving currently installed packages.
- """
- self._set_graph_config(graph_config)
- self._blocker_db = {}
- depgraph_params = create_depgraph_params(self.myopts, None)
- dynamic_deps = "dynamic_deps" in depgraph_params
- ignore_built_slot_operator_deps = self.myopts.get(
- "--ignore-built-slot-operator-deps", "n") == "y"
- for root in self.trees:
- if graph_config is None:
- fake_vartree = FakeVartree(self.trees[root]["root_config"],
- pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps,
- ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
- fake_vartree.sync()
- else:
- fake_vartree = graph_config.trees[root]['vartree']
- self._blocker_db[root] = BlockerDB(fake_vartree)
-
- def _destroy_graph(self):
- """
- Use this to free memory at the beginning of _calc_resume_list().
- After _calc_resume_list(), the _init_graph() method
- must to be called in order to re-generate the structures that
- this method destroys.
- """
- self._blocker_db = None
- self._set_graph_config(None)
- gc.collect()
-
- def _set_max_jobs(self, max_jobs):
- self._max_jobs = max_jobs
- self._task_queues.jobs.max_jobs = max_jobs
- if "parallel-install" in self.settings.features:
- self._task_queues.merge.max_jobs = max_jobs
-
- def _background_mode(self):
- """
- Check if background mode is enabled and adjust states as necessary.
-
- @rtype: bool
- @return: True if background mode is enabled, False otherwise.
- """
- background = (self._max_jobs is True or \
- self._max_jobs > 1 or "--quiet" in self.myopts \
- or self.myopts.get("--quiet-build") == "y") and \
- not bool(self._opts_no_background.intersection(self.myopts))
-
- if background:
- interactive_tasks = self._get_interactive_tasks()
- if interactive_tasks:
- background = False
- writemsg_level(">>> Sending package output to stdio due " + \
- "to interactive package(s):\n",
- level=logging.INFO, noiselevel=-1)
- msg = [""]
- for pkg in interactive_tasks:
- pkg_str = " " + colorize("INFORM", str(pkg.cpv))
- if pkg.root_config.settings["ROOT"] != "/":
- pkg_str += " for " + pkg.root
- msg.append(pkg_str)
- msg.append("")
- writemsg_level("".join("%s\n" % (l,) for l in msg),
- level=logging.INFO, noiselevel=-1)
- if self._max_jobs is True or self._max_jobs > 1:
- self._set_max_jobs(1)
- writemsg_level(">>> Setting --jobs=1 due " + \
- "to the above interactive package(s)\n",
- level=logging.INFO, noiselevel=-1)
- writemsg_level(">>> In order to temporarily mask " + \
- "interactive updates, you may\n" + \
- ">>> specify --accept-properties=-interactive\n",
- level=logging.INFO, noiselevel=-1)
- self._status_display.quiet = \
- not background or \
- ("--quiet" in self.myopts and \
- "--verbose" not in self.myopts)
-
- self._logger.xterm_titles = \
- "notitles" not in self.settings.features and \
- self._status_display.quiet
-
- return background
-
- def _get_interactive_tasks(self):
- interactive_tasks = []
- for task in self._mergelist:
- if not (isinstance(task, Package) and \
- task.operation == "merge"):
- continue
- if 'interactive' in task.properties:
- interactive_tasks.append(task)
- return interactive_tasks
-
- def _set_graph_config(self, graph_config):
-
- if graph_config is None:
- self._graph_config = None
- self._pkg_cache = {}
- self._digraph = None
- self._mergelist = []
- self._world_atoms = None
- self._deep_system_deps.clear()
- return
-
- self._graph_config = graph_config
- self._pkg_cache = graph_config.pkg_cache
- self._digraph = graph_config.graph
- self._mergelist = graph_config.mergelist
-
- # Generate world atoms while the event loop is not running,
- # since otherwise portdbapi match calls in the create_world_atom
- # function could trigger event loop recursion.
- self._world_atoms = {}
- for pkg in self._mergelist:
- if getattr(pkg, 'operation', None) != 'merge':
- continue
- atom = create_world_atom(pkg, self._args_set,
- pkg.root_config, before_install=True)
- if atom is not None:
- self._world_atoms[pkg] = atom
-
- if "--nodeps" in self.myopts or \
- (self._max_jobs is not True and self._max_jobs < 2):
- # save some memory
- self._digraph = None
- graph_config.graph = None
- graph_config.pkg_cache.clear()
- self._deep_system_deps.clear()
- for pkg in self._mergelist:
- self._pkg_cache[pkg] = pkg
- return
-
- self._find_system_deps()
- self._prune_digraph()
- self._prevent_builddir_collisions()
- if '--debug' in self.myopts:
- writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
- self._digraph.debug_print()
- writemsg("\n", noiselevel=-1)
-
- def _find_system_deps(self):
- """
- Find system packages and their deep runtime dependencies. Before being
- merged, these packages go to merge_wait_queue, to be merged when no
- other packages are building.
- NOTE: This can only find deep system deps if the system set has been
- added to the graph and traversed deeply (the depgraph "complete"
- parameter will do this, triggered by emerge --complete-graph option).
- """
- params = create_depgraph_params(self.myopts, None)
- if not params["implicit_system_deps"]:
- return
-
- deep_system_deps = self._deep_system_deps
- deep_system_deps.clear()
- deep_system_deps.update(
- _find_deep_system_runtime_deps(self._digraph))
- deep_system_deps.difference_update([pkg for pkg in \
- deep_system_deps if pkg.operation != "merge"])
-
- def _prune_digraph(self):
- """
- Prune any root nodes that are irrelevant.
- """
-
- graph = self._digraph
- completed_tasks = self._completed_tasks
- removed_nodes = set()
- while True:
- for node in graph.root_nodes():
- if not isinstance(node, Package) or \
- (node.installed and node.operation == "nomerge") or \
- node.onlydeps or \
- node in completed_tasks:
- removed_nodes.add(node)
- if removed_nodes:
- graph.difference_update(removed_nodes)
- if not removed_nodes:
- break
- removed_nodes.clear()
-
- def _prevent_builddir_collisions(self):
- """
- When building stages, sometimes the same exact cpv needs to be merged
- to both $ROOTs. Add edges to the digraph in order to avoid collisions
- in the builddir. Currently, normal file locks would be inappropriate
- for this purpose since emerge holds all of it's build dir locks from
- the main process.
- """
- cpv_map = {}
- for pkg in self._mergelist:
- if not isinstance(pkg, Package):
- # a satisfied blocker
- continue
- if pkg.installed:
- continue
- if pkg.cpv not in cpv_map:
- cpv_map[pkg.cpv] = [pkg]
- continue
- for earlier_pkg in cpv_map[pkg.cpv]:
- self._digraph.add(earlier_pkg, pkg,
- priority=DepPriority(buildtime=True))
- cpv_map[pkg.cpv].append(pkg)
-
- class _pkg_failure(portage.exception.PortageException):
- """
- An instance of this class is raised by unmerge() when
- an uninstallation fails.
- """
- status = 1
- def __init__(self, *pargs):
- portage.exception.PortageException.__init__(self, pargs)
- if pargs:
- self.status = pargs[0]
-
- def _schedule_fetch(self, fetcher):
- """
- Schedule a fetcher, in order to control the number of concurrent
- fetchers. If self._max_jobs is greater than 1 then the fetch
- queue is bypassed and the fetcher is started immediately,
- otherwise it is added to the front of the parallel-fetch queue.
- NOTE: The parallel-fetch queue is currently used to serialize
- access to the parallel-fetch log, so changes in the log handling
- would be required before it would be possible to enable
- concurrent fetching within the parallel-fetch queue.
- """
- if self._max_jobs > 1:
- fetcher.start()
- else:
- self._task_queues.fetch.addFront(fetcher)
-
- def _schedule_setup(self, setup_phase):
- """
- Schedule a setup phase on the merge queue, in order to
- serialize unsandboxed access to the live filesystem.
- """
- if self._task_queues.merge.max_jobs > 1 and \
- "ebuild-locks" in self.settings.features:
- # Use a separate queue for ebuild-locks when the merge
- # queue allows more than 1 job (due to parallel-install),
- # since the portage.locks module does not behave as desired
- # if we try to lock the same file multiple times
- # concurrently from the same process.
- self._task_queues.ebuild_locks.add(setup_phase)
- else:
- self._task_queues.merge.add(setup_phase)
- self._schedule()
-
- def _schedule_unpack(self, unpack_phase):
- """
- Schedule an unpack phase on the unpack queue, in order
- to serialize $DISTDIR access for live ebuilds.
- """
- self._task_queues.unpack.add(unpack_phase)
-
- def _find_blockers(self, new_pkg):
- """
- Returns a callable.
- """
- def get_blockers():
- return self._find_blockers_impl(new_pkg)
- return get_blockers
-
- def _find_blockers_impl(self, new_pkg):
- if self._opts_ignore_blockers.intersection(self.myopts):
- return None
-
- blocker_db = self._blocker_db[new_pkg.root]
-
- blocked_pkgs = []
- for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
- if new_pkg.slot_atom == blocking_pkg.slot_atom:
- continue
- if new_pkg.cpv == blocking_pkg.cpv:
- continue
- blocked_pkgs.append(blocking_pkg)
-
- return blocked_pkgs
-
- def _generate_digests(self):
- """
- Generate digests if necessary for --digests or FEATURES=digest.
- In order to avoid interference, this must done before parallel
- tasks are started.
- """
-
- digest = '--digest' in self.myopts
- if not digest:
- for pkgsettings in self.pkgsettings.values():
- if pkgsettings.mycpv is not None:
- # ensure that we are using global features
- # settings rather than those from package.env
- pkgsettings.reset()
- if 'digest' in pkgsettings.features:
- digest = True
- break
-
- if not digest:
- return os.EX_OK
-
- for x in self._mergelist:
- if not isinstance(x, Package) or \
- x.type_name != 'ebuild' or \
- x.operation != 'merge':
- continue
- pkgsettings = self.pkgsettings[x.root]
- if pkgsettings.mycpv is not None:
- # ensure that we are using global features
- # settings rather than those from package.env
- pkgsettings.reset()
- if '--digest' not in self.myopts and \
- 'digest' not in pkgsettings.features:
- continue
- portdb = x.root_config.trees['porttree'].dbapi
- ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
- if ebuild_path is None:
- raise AssertionError("ebuild not found for '%s'" % x.cpv)
- pkgsettings['O'] = os.path.dirname(ebuild_path)
- if not digestgen(mysettings=pkgsettings, myportdb=portdb):
- writemsg_level(
- "!!! Unable to generate manifest for '%s'.\n" \
- % x.cpv, level=logging.ERROR, noiselevel=-1)
- return FAILURE
-
- return os.EX_OK
-
- def _check_manifests(self):
- # Verify all the manifests now so that the user is notified of failure
- # as soon as possible.
- if "strict" not in self.settings.features or \
- "--fetchonly" in self.myopts or \
- "--fetch-all-uri" in self.myopts:
- return os.EX_OK
-
- shown_verifying_msg = False
- quiet_settings = {}
- for myroot, pkgsettings in self.pkgsettings.items():
- quiet_config = portage.config(clone=pkgsettings)
- quiet_config["PORTAGE_QUIET"] = "1"
- quiet_config.backup_changes("PORTAGE_QUIET")
- quiet_settings[myroot] = quiet_config
- del quiet_config
-
- failures = 0
-
- for x in self._mergelist:
- if not isinstance(x, Package) or \
- x.type_name != "ebuild":
- continue
-
- if x.operation == "uninstall":
- continue
-
- if not shown_verifying_msg:
- shown_verifying_msg = True
- self._status_msg("Verifying ebuild manifests")
-
- root_config = x.root_config
- portdb = root_config.trees["porttree"].dbapi
- quiet_config = quiet_settings[root_config.root]
- ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
- if ebuild_path is None:
- raise AssertionError("ebuild not found for '%s'" % x.cpv)
- quiet_config["O"] = os.path.dirname(ebuild_path)
- if not digestcheck([], quiet_config, strict=True):
- failures |= 1
-
- if failures:
- return FAILURE
- return os.EX_OK
-
- def _add_prefetchers(self):
-
- if not self._parallel_fetch:
- return
-
- if self._parallel_fetch:
-
- prefetchers = self._prefetchers
-
- for pkg in self._mergelist:
- # mergelist can contain solved Blocker instances
- if not isinstance(pkg, Package) or pkg.operation == "uninstall":
- continue
- prefetcher = self._create_prefetcher(pkg)
- if prefetcher is not None:
- # This will start the first prefetcher immediately, so that
- # self._task() won't discard it. This avoids a case where
- # the first prefetcher is discarded, causing the second
- # prefetcher to occupy the fetch queue before the first
- # fetcher has an opportunity to execute.
- prefetchers[pkg] = prefetcher
- self._task_queues.fetch.add(prefetcher)
-
- def _create_prefetcher(self, pkg):
- """
- @return: a prefetcher, or None if not applicable
- """
- prefetcher = None
-
- if not isinstance(pkg, Package):
- pass
-
- elif pkg.type_name == "ebuild":
-
- prefetcher = EbuildFetcher(background=True,
- config_pool=self._ConfigPool(pkg.root,
- self._allocate_config, self._deallocate_config),
- fetchonly=1, fetchall=self._build_opts.fetch_all_uri,
- logfile=self._fetch_log,
- pkg=pkg, prefetch=True, scheduler=self._sched_iface)
-
- elif pkg.type_name == "binary" and \
- "--getbinpkg" in self.myopts and \
- pkg.root_config.trees["bintree"].isremote(pkg.cpv):
-
- prefetcher = BinpkgPrefetcher(background=True,
- pkg=pkg, scheduler=self._sched_iface)
-
- return prefetcher
-
- def _run_pkg_pretend(self):
- """
- Since pkg_pretend output may be important, this method sends all
- output directly to stdout (regardless of options like --quiet or
- --jobs).
- """
-
- failures = 0
- sched_iface = self._sched_iface
-
- for x in self._mergelist:
- if not isinstance(x, Package):
- continue
-
- if x.operation == "uninstall":
- continue
-
- if x.eapi in ("0", "1", "2", "3"):
- continue
-
- if "pretend" not in x.defined_phases:
- continue
-
- out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
- portage.util.writemsg_stdout(out_str, noiselevel=-1)
-
- root_config = x.root_config
- settings = self.pkgsettings[root_config.root]
- settings.setcpv(x)
-
- # setcpv/package.env allows for per-package PORTAGE_TMPDIR so we
- # have to validate it for each package
- rval = _check_temp_dir(settings)
- if rval != os.EX_OK:
- return rval
-
- build_dir_path = os.path.join(
- os.path.realpath(settings["PORTAGE_TMPDIR"]),
- "portage", x.category, x.pf)
- existing_builddir = os.path.isdir(build_dir_path)
- settings["PORTAGE_BUILDDIR"] = build_dir_path
- build_dir = EbuildBuildDir(scheduler=sched_iface,
- settings=settings)
- sched_iface.run_until_complete(build_dir.async_lock())
- current_task = None
-
- try:
-
- # Clean up the existing build dir, in case pkg_pretend
- # checks for available space (bug #390711).
- if existing_builddir:
- if x.built:
- tree = "bintree"
- infloc = os.path.join(build_dir_path, "build-info")
- ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
- else:
- tree = "porttree"
- portdb = root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
- if ebuild_path is None:
- raise AssertionError(
- "ebuild not found for '%s'" % x.cpv)
- portage.package.ebuild.doebuild.doebuild_environment(
- ebuild_path, "clean", settings=settings,
- db=self.trees[settings['EROOT']][tree].dbapi)
- clean_phase = EbuildPhase(background=False,
- phase='clean', scheduler=sched_iface, settings=settings)
- current_task = clean_phase
- clean_phase.start()
- clean_phase.wait()
-
- if x.built:
- tree = "bintree"
- bintree = root_config.trees["bintree"].dbapi.bintree
- fetched = False
-
- # Display fetch on stdout, so that it's always clear what
- # is consuming time here.
- if bintree.isremote(x.cpv):
- fetcher = BinpkgFetcher(pkg=x,
- scheduler=sched_iface)
- fetcher.start()
- if fetcher.wait() != os.EX_OK:
- failures += 1
- continue
- fetched = fetcher.pkg_path
-
- if fetched is False:
- filename = bintree.getname(x.cpv)
- else:
- filename = fetched
- verifier = BinpkgVerifier(pkg=x,
- scheduler=sched_iface, _pkg_path=filename)
- current_task = verifier
- verifier.start()
- if verifier.wait() != os.EX_OK:
- failures += 1
- continue
-
- if fetched:
- bintree.inject(x.cpv, filename=fetched)
-
- infloc = os.path.join(build_dir_path, "build-info")
- ensure_dirs(infloc)
- self._sched_iface.run_until_complete(
- bintree.dbapi.unpack_metadata(settings, infloc))
- ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
- settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
- settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
-
- else:
- tree = "porttree"
- portdb = root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
- if ebuild_path is None:
- raise AssertionError("ebuild not found for '%s'" % x.cpv)
- settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
- if self._build_opts.buildpkgonly:
- settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
- else:
- settings.configdict["pkg"]["MERGE_TYPE"] = "source"
-
- portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
- "pretend", settings=settings,
- db=self.trees[settings['EROOT']][tree].dbapi)
-
- prepare_build_dirs(root_config.root, settings, cleanup=0)
-
- vardb = root_config.trees['vartree'].dbapi
- settings["REPLACING_VERSIONS"] = " ".join(
- set(portage.versions.cpv_getversion(match) \
- for match in vardb.match(x.slot_atom) + \
- vardb.match('='+x.cpv)))
- pretend_phase = EbuildPhase(
- phase="pretend", scheduler=sched_iface,
- settings=settings)
-
- current_task = pretend_phase
- pretend_phase.start()
- ret = pretend_phase.wait()
- if ret != os.EX_OK:
- failures += 1
- portage.elog.elog_process(x.cpv, settings)
- finally:
-
- if current_task is not None:
- if current_task.isAlive():
- current_task.cancel()
- current_task.wait()
- if current_task.returncode == os.EX_OK:
- clean_phase = EbuildPhase(background=False,
- phase='clean', scheduler=sched_iface,
- settings=settings)
- clean_phase.start()
- clean_phase.wait()
-
- sched_iface.run_until_complete(build_dir.async_unlock())
-
- if failures:
- return FAILURE
- return os.EX_OK
-
- def merge(self):
- if "--resume" in self.myopts:
- # We're resuming.
- portage.writemsg_stdout(
- colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
- self._logger.log(" *** Resuming merge...")
-
- self._save_resume_list()
-
- try:
- self._background = self._background_mode()
- except self._unknown_internal_error:
- return FAILURE
-
- rval = self._handle_self_update()
- if rval != os.EX_OK:
- return rval
-
- for root in self.trees:
- root_config = self.trees[root]["root_config"]
-
- # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
- # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
- # for ensuring sane $PWD (bug #239560) and storing elog messages.
- tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
- if not tmpdir or not os.path.isdir(tmpdir):
- msg = (
- 'The directory specified in your PORTAGE_TMPDIR variable does not exist:',
- tmpdir,
- 'Please create this directory or correct your PORTAGE_TMPDIR setting.',
- )
- out = portage.output.EOutput()
- for l in msg:
- out.eerror(l)
- return FAILURE
-
- if self._background:
- root_config.settings.unlock()
- root_config.settings["PORTAGE_BACKGROUND"] = "1"
- root_config.settings.backup_changes("PORTAGE_BACKGROUND")
- root_config.settings.lock()
-
- self.pkgsettings[root] = portage.config(
- clone=root_config.settings)
-
- keep_going = "--keep-going" in self.myopts
- fetchonly = self._build_opts.fetchonly
- mtimedb = self._mtimedb
- failed_pkgs = self._failed_pkgs
-
- rval = self._generate_digests()
- if rval != os.EX_OK:
- return rval
-
- # TODO: Immediately recalculate deps here if --keep-going
- # is enabled and corrupt manifests are detected.
- rval = self._check_manifests()
- if rval != os.EX_OK and not keep_going:
- return rval
-
- if not fetchonly:
- rval = self._run_pkg_pretend()
- if rval != os.EX_OK:
- return rval
-
- while True:
-
- received_signal = []
-
- def sighandler(signum, frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
- {"signal":signum})
- self.terminate()
- received_signal.append(128 + signum)
-
- earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
- earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
- earlier_sigcont_handler = \
- signal.signal(signal.SIGCONT, self._sigcont_handler)
- signal.siginterrupt(signal.SIGCONT, False)
-
- try:
- rval = self._merge()
- finally:
- # Restore previous handlers
- if earlier_sigint_handler is not None:
- signal.signal(signal.SIGINT, earlier_sigint_handler)
- else:
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- if earlier_sigterm_handler is not None:
- signal.signal(signal.SIGTERM, earlier_sigterm_handler)
- else:
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
- if earlier_sigcont_handler is not None:
- signal.signal(signal.SIGCONT, earlier_sigcont_handler)
- else:
- signal.signal(signal.SIGCONT, signal.SIG_DFL)
-
- self._termination_check()
- if received_signal:
- sys.exit(received_signal[0])
-
- if rval == os.EX_OK or fetchonly or not keep_going:
- break
- if "resume" not in mtimedb:
- break
- mergelist = self._mtimedb["resume"].get("mergelist")
- if not mergelist:
- break
-
- if not failed_pkgs:
- break
-
- for failed_pkg in failed_pkgs:
- mergelist.remove(list(failed_pkg.pkg))
-
- self._failed_pkgs_all.extend(failed_pkgs)
- del failed_pkgs[:]
-
- if not mergelist:
- break
-
- if not self._calc_resume_list():
- break
-
- clear_caches(self.trees)
- if not self._mergelist:
- break
-
- self._save_resume_list()
- self._pkg_count.curval = 0
- self._pkg_count.maxval = len([x for x in self._mergelist \
- if isinstance(x, Package) and x.operation == "merge"])
- self._status_display.maxval = self._pkg_count.maxval
-
- # Cleanup any callbacks that have been registered with the global
- # event loop by calls to the terminate method.
- self._cleanup()
-
- self._logger.log(" *** Finished. Cleaning up...")
-
- if failed_pkgs:
- self._failed_pkgs_all.extend(failed_pkgs)
- del failed_pkgs[:]
-
- printer = portage.output.EOutput()
- background = self._background
- failure_log_shown = False
- if background and len(self._failed_pkgs_all) == 1 and \
- self.myopts.get('--quiet-fail', 'n') != 'y':
- # If only one package failed then just show it's
- # whole log for easy viewing.
- failed_pkg = self._failed_pkgs_all[-1]
- log_file = None
- log_file_real = None
-
- log_path = self._locate_failure_log(failed_pkg)
- if log_path is not None:
- try:
- log_file = open(_unicode_encode(log_path,
- encoding=_encodings['fs'], errors='strict'), mode='rb')
- except IOError:
- pass
- else:
- if log_path.endswith('.gz'):
- log_file_real = log_file
- log_file = gzip.GzipFile(filename='',
- mode='rb', fileobj=log_file)
-
- if log_file is not None:
- try:
- for line in log_file:
- writemsg_level(line, noiselevel=-1)
- except zlib.error as e:
- writemsg_level("%s\n" % (e,), level=logging.ERROR,
- noiselevel=-1)
- finally:
- log_file.close()
- if log_file_real is not None:
- log_file_real.close()
- failure_log_shown = True
-
- # Dump mod_echo output now since it tends to flood the terminal.
- # This allows us to avoid having more important output, generated
- # later, from being swept away by the mod_echo output.
- mod_echo_output = _flush_elog_mod_echo()
-
- if background and not failure_log_shown and \
- self._failed_pkgs_all and \
- self._failed_pkgs_die_msgs and \
- not mod_echo_output:
-
- for mysettings, key, logentries in self._failed_pkgs_die_msgs:
- root_msg = ""
- if mysettings["ROOT"] != "/":
- root_msg = " merged to %s" % mysettings["ROOT"]
- print()
- printer.einfo("Error messages for package %s%s:" % \
- (colorize("INFORM", key), root_msg))
- print()
- for phase in portage.const.EBUILD_PHASES:
- if phase not in logentries:
- continue
- for msgtype, msgcontent in logentries[phase]:
- if isinstance(msgcontent, basestring):
- msgcontent = [msgcontent]
- for line in msgcontent:
- printer.eerror(line.strip("\n"))
-
- if self._post_mod_echo_msgs:
- for msg in self._post_mod_echo_msgs:
- msg()
-
- if len(self._failed_pkgs_all) > 1 or \
- (self._failed_pkgs_all and keep_going):
- if len(self._failed_pkgs_all) > 1:
- msg = "The following %d packages have " % \
- len(self._failed_pkgs_all) + \
- "failed to build, install, or execute postinst:"
- else:
- msg = "The following package has " + \
- "failed to build, install, or execute postinst:"
-
- printer.eerror("")
- for line in textwrap.wrap(msg, 72):
- printer.eerror(line)
- printer.eerror("")
- for failed_pkg in self._failed_pkgs_all:
- # Use unicode_literals to force unicode format string so
- # that Package.__unicode__() is called in python2.
- msg = " %s" % (failed_pkg.pkg,)
- if failed_pkg.postinst_failure:
- msg += " (postinst failed)"
- log_path = self._locate_failure_log(failed_pkg)
- if log_path is not None:
- msg += ", Log file:"
- printer.eerror(msg)
- if log_path is not None:
- printer.eerror(" '%s'" % colorize('INFORM', log_path))
- printer.eerror("")
-
- if self._failed_pkgs_all:
- return FAILURE
- return os.EX_OK
-
- def _elog_listener(self, mysettings, key, logentries, fulltext):
- errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
- if errors:
- self._failed_pkgs_die_msgs.append(
- (mysettings, key, errors))
-
- def _locate_failure_log(self, failed_pkg):
-
- log_paths = [failed_pkg.build_log]
-
- for log_path in log_paths:
- if not log_path:
- continue
-
- try:
- log_size = os.stat(log_path).st_size
- except OSError:
- continue
-
- if log_size == 0:
- continue
-
- return log_path
-
- return None
-
- def _add_packages(self):
- pkg_queue = self._pkg_queue
- for pkg in self._mergelist:
- if isinstance(pkg, Package):
- pkg_queue.append(pkg)
- elif isinstance(pkg, Blocker):
- pass
-
- def _system_merge_started(self, merge):
- """
- Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
- In general, this keeps track of installed system packages with
- unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
- a fragile situation, so we don't execute any unrelated builds until
- the circular dependencies are built and installed.
- """
- graph = self._digraph
- if graph is None:
- return
- pkg = merge.merge.pkg
-
- # Skip this if $ROOT != / since it shouldn't matter if there
- # are unsatisfied system runtime deps in this case.
- if pkg.root_config.settings["ROOT"] != "/":
- return
-
- completed_tasks = self._completed_tasks
- unsatisfied = self._unsatisfied_system_deps
-
- def ignore_non_runtime_or_satisfied(priority):
- """
- Ignore non-runtime and satisfied runtime priorities.
- """
- if isinstance(priority, DepPriority) and \
- not priority.satisfied and \
- (priority.runtime or priority.runtime_post):
- return False
- return True
-
- # When checking for unsatisfied runtime deps, only check
- # direct deps since indirect deps are checked when the
- # corresponding parent is merged.
- for child in graph.child_nodes(pkg,
- ignore_priority=ignore_non_runtime_or_satisfied):
- if not isinstance(child, Package) or \
- child.operation == 'uninstall':
- continue
- if child is pkg:
- continue
- if child.operation == 'merge' and \
- child not in completed_tasks:
- unsatisfied.add(child)
-
- def _merge_wait_exit_handler(self, task):
- self._merge_wait_scheduled.remove(task)
- self._merge_exit(task)
-
- def _merge_exit(self, merge):
- self._running_tasks.pop(id(merge), None)
- self._do_merge_exit(merge)
- self._deallocate_config(merge.merge.settings)
- if merge.returncode == os.EX_OK and \
- not merge.merge.pkg.installed:
- self._status_display.curval += 1
- self._status_display.merges = len(self._task_queues.merge)
- self._schedule()
-
- def _do_merge_exit(self, merge):
- pkg = merge.merge.pkg
- if merge.returncode != os.EX_OK:
- settings = merge.merge.settings
- build_dir = settings.get("PORTAGE_BUILDDIR")
- build_log = settings.get("PORTAGE_LOG_FILE")
-
- self._failed_pkgs.append(self._failed_pkg(
- build_dir=build_dir, build_log=build_log,
- pkg=pkg,
- returncode=merge.returncode))
- if not self._terminated_tasks:
- self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
- self._status_display.failed = len(self._failed_pkgs)
- return
-
- if merge.postinst_failure:
- # Append directly to _failed_pkgs_all for non-critical errors.
- self._failed_pkgs_all.append(self._failed_pkg(
- build_dir=merge.merge.settings.get("PORTAGE_BUILDDIR"),
- build_log=merge.merge.settings.get("PORTAGE_LOG_FILE"),
- pkg=pkg,
- postinst_failure=True,
- returncode=merge.returncode))
- self._failed_pkg_msg(self._failed_pkgs_all[-1],
- "execute postinst for", "for")
-
- self._task_complete(pkg)
- pkg_to_replace = merge.merge.pkg_to_replace
- if pkg_to_replace is not None:
- # When a package is replaced, mark it's uninstall
- # task complete (if any).
- if self._digraph is not None and \
- pkg_to_replace in self._digraph:
- try:
- self._pkg_queue.remove(pkg_to_replace)
- except ValueError:
- pass
- self._task_complete(pkg_to_replace)
- else:
- self._pkg_cache.pop(pkg_to_replace, None)
-
- if pkg.installed:
- return
-
- # Call mtimedb.commit() after each merge so that
- # --resume still works after being interrupted
- # by reboot, sigkill or similar.
- mtimedb = self._mtimedb
- mtimedb["resume"]["mergelist"].remove(list(pkg))
- if not mtimedb["resume"]["mergelist"]:
- del mtimedb["resume"]
- mtimedb.commit()
-
- def _build_exit(self, build):
- self._running_tasks.pop(id(build), None)
- if build.returncode == os.EX_OK and self._terminated_tasks:
- # We've been interrupted, so we won't
- # add this to the merge queue.
- self.curval += 1
- self._deallocate_config(build.settings)
- elif build.returncode == os.EX_OK:
- self.curval += 1
- merge = PackageMerge(merge=build, scheduler=self._sched_iface)
- self._running_tasks[id(merge)] = merge
- if not build.build_opts.buildpkgonly and \
- build.pkg in self._deep_system_deps:
- # Since dependencies on system packages are frequently
- # unspecified, merge them only when no builds are executing.
- self._merge_wait_queue.append(merge)
- merge.addStartListener(self._system_merge_started)
- else:
- self._task_queues.merge.add(merge)
- merge.addExitListener(self._merge_exit)
- self._status_display.merges = len(self._task_queues.merge)
- else:
- settings = build.settings
- build_dir = settings.get("PORTAGE_BUILDDIR")
- build_log = settings.get("PORTAGE_LOG_FILE")
-
- self._failed_pkgs.append(self._failed_pkg(
- build_dir=build_dir, build_log=build_log,
- pkg=build.pkg,
- returncode=build.returncode))
- if not self._terminated_tasks:
- self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
- self._status_display.failed = len(self._failed_pkgs)
- self._deallocate_config(build.settings)
- self._jobs -= 1
- self._status_display.running = self._jobs
- self._schedule()
-
- def _extract_exit(self, build):
- self._build_exit(build)
-
- def _task_complete(self, pkg):
- self._completed_tasks.add(pkg)
- self._unsatisfied_system_deps.discard(pkg)
- self._choose_pkg_return_early = False
- blocker_db = self._blocker_db[pkg.root]
- blocker_db.discardBlocker(pkg)
-
- def _main_loop(self):
- self._main_exit = self._event_loop.create_future()
-
- if self._max_load is not None and \
- self._loadavg_latency is not None and \
- (self._max_jobs is True or self._max_jobs > 1):
- # We have to schedule periodically, in case the load
- # average has changed since the last call.
- self._main_loadavg_handle = self._event_loop.call_later(
- self._loadavg_latency, self._schedule)
-
- self._schedule()
- self._event_loop.run_until_complete(self._main_exit)
-
- def _merge(self):
-
- if self._opts_no_background.intersection(self.myopts):
- self._set_max_jobs(1)
-
- self._add_prefetchers()
- self._add_packages()
- failed_pkgs = self._failed_pkgs
- portage.locks._quiet = self._background
- portage.elog.add_listener(self._elog_listener)
-
- def display_callback():
- self._status_display.display()
- display_callback.handle = self._event_loop.call_later(
- self._max_display_latency, display_callback)
- display_callback.handle = None
-
- if self._status_display._isatty and not self._status_display.quiet:
- display_callback()
- rval = os.EX_OK
-
- try:
- self._main_loop()
- finally:
- self._main_loop_cleanup()
- portage.locks._quiet = False
- portage.elog.remove_listener(self._elog_listener)
- if display_callback.handle is not None:
- display_callback.handle.cancel()
- if failed_pkgs:
- rval = failed_pkgs[-1].returncode
-
- return rval
-
- def _main_loop_cleanup(self):
- del self._pkg_queue[:]
- self._completed_tasks.clear()
- self._deep_system_deps.clear()
- self._unsatisfied_system_deps.clear()
- self._choose_pkg_return_early = False
- self._status_display.reset()
- self._digraph = None
- self._task_queues.fetch.clear()
- self._prefetchers.clear()
- self._main_exit = None
- if self._main_loadavg_handle is not None:
- self._main_loadavg_handle.cancel()
- self._main_loadavg_handle = None
- if self._job_delay_timeout_id is not None:
- self._job_delay_timeout_id.cancel()
- self._job_delay_timeout_id = None
- if self._schedule_merge_wakeup_task is not None:
- self._schedule_merge_wakeup_task.cancel()
- self._schedule_merge_wakeup_task = None
-
- def _choose_pkg(self):
- """
- Choose a task that has all its dependencies satisfied. This is used
- for parallel build scheduling, and ensures that we don't build
- anything with deep dependencies that have yet to be merged.
- """
-
- if self._choose_pkg_return_early:
- return None
-
- if self._digraph is None:
- if self._is_work_scheduled() and \
- not ("--nodeps" in self.myopts and \
- (self._max_jobs is True or self._max_jobs > 1)):
- self._choose_pkg_return_early = True
- return None
- return self._pkg_queue.pop(0)
-
- if not self._is_work_scheduled():
- return self._pkg_queue.pop(0)
-
- self._prune_digraph()
-
- chosen_pkg = None
-
- # Prefer uninstall operations when available.
- graph = self._digraph
- for pkg in self._pkg_queue:
- if pkg.operation == 'uninstall' and \
- not graph.child_nodes(pkg):
- chosen_pkg = pkg
- break
-
- if chosen_pkg is None:
- later = set(self._pkg_queue)
- for pkg in self._pkg_queue:
- later.remove(pkg)
- if not self._dependent_on_scheduled_merges(pkg, later):
- chosen_pkg = pkg
- break
-
- if chosen_pkg is not None:
- self._pkg_queue.remove(chosen_pkg)
-
- if chosen_pkg is None:
- # There's no point in searching for a package to
- # choose until at least one of the existing jobs
- # completes.
- self._choose_pkg_return_early = True
-
- return chosen_pkg
-
- def _dependent_on_scheduled_merges(self, pkg, later):
- """
- Traverse the subgraph of the given packages deep dependencies
- to see if it contains any scheduled merges.
- @param pkg: a package to check dependencies for
- @type pkg: Package
- @param later: packages for which dependence should be ignored
- since they will be merged later than pkg anyway and therefore
- delaying the merge of pkg will not result in a more optimal
- merge order
- @type later: set
- @rtype: bool
- @return: True if the package is dependent, False otherwise.
- """
-
- graph = self._digraph
- completed_tasks = self._completed_tasks
-
- dependent = False
- traversed_nodes = set([pkg])
- direct_deps = graph.child_nodes(pkg)
- node_stack = direct_deps
- direct_deps = frozenset(direct_deps)
- while node_stack:
- node = node_stack.pop()
- if node in traversed_nodes:
- continue
- traversed_nodes.add(node)
- if not ((node.installed and node.operation == "nomerge") or \
- (node.operation == "uninstall" and \
- node not in direct_deps) or \
- node in completed_tasks or \
- node in later):
- dependent = True
- break
-
- # Don't traverse children of uninstall nodes since
- # those aren't dependencies in the usual sense.
- if node.operation != "uninstall":
- node_stack.extend(graph.child_nodes(node))
-
- return dependent
-
- def _allocate_config(self, root):
- """
- Allocate a unique config instance for a task in order
- to prevent interference between parallel tasks.
- """
- if self._config_pool[root]:
- temp_settings = self._config_pool[root].pop()
- else:
- temp_settings = portage.config(clone=self.pkgsettings[root])
- # Since config.setcpv() isn't guaranteed to call config.reset() due to
- # performance reasons, call it here to make sure all settings from the
- # previous package get flushed out (such as PORTAGE_LOG_FILE).
- temp_settings.reload()
- temp_settings.reset()
- return temp_settings
-
- def _deallocate_config(self, settings):
- self._config_pool[settings['EROOT']].append(settings)
-
- def _keep_scheduling(self):
- return bool(not self._terminated.is_set() and self._pkg_queue and \
- not (self._failed_pkgs and not self._build_opts.fetchonly))
-
- def _is_work_scheduled(self):
- return bool(self._running_tasks)
-
- def _running_job_count(self):
- return self._jobs
-
- def _schedule_tasks(self):
-
- while True:
-
- state_change = 0
-
- # When the number of jobs and merges drops to zero,
- # process a single merge from _merge_wait_queue if
- # it's not empty. We only process one since these are
- # special packages and we want to ensure that
- # parallel-install does not cause more than one of
- # them to install at the same time.
- if (self._merge_wait_queue and not self._jobs and
- not self._task_queues.merge):
- task = self._merge_wait_queue.popleft()
- task.scheduler = self._sched_iface
- self._merge_wait_scheduled.append(task)
- self._task_queues.merge.add(task)
- task.addExitListener(self._merge_wait_exit_handler)
- self._status_display.merges = len(self._task_queues.merge)
- state_change += 1
-
- if self._schedule_tasks_imp():
- state_change += 1
-
- self._status_display.display()
-
- # Cancel prefetchers if they're the only reason
- # the main poll loop is still running.
- if self._failed_pkgs and not self._build_opts.fetchonly and \
- not self._is_work_scheduled() and \
- self._task_queues.fetch:
- # Since this happens asynchronously, it doesn't count in
- # state_change (counting it triggers an infinite loop).
- self._task_queues.fetch.clear()
-
- if not (state_change or \
- (self._merge_wait_queue and not self._jobs and
- not self._task_queues.merge)):
- break
-
- if not (self._is_work_scheduled() or
- self._keep_scheduling() or self._main_exit.done()):
- self._main_exit.set_result(None)
- elif self._main_loadavg_handle is not None:
- self._main_loadavg_handle.cancel()
- self._main_loadavg_handle = self._event_loop.call_later(
- self._loadavg_latency, self._schedule)
-
- # Failure to schedule *after* self._task_queues.merge becomes
- # empty will cause the scheduler to hang as in bug 711322.
- # Do not rely on scheduling which occurs via the _merge_exit
- # method, since the order of callback invocation may cause
- # self._task_queues.merge to appear non-empty when it is
- # about to become empty.
- if (self._task_queues.merge and (self._schedule_merge_wakeup_task is None
- or self._schedule_merge_wakeup_task.done())):
- self._schedule_merge_wakeup_task = asyncio.ensure_future(
- self._task_queues.merge.wait(), loop=self._event_loop)
- self._schedule_merge_wakeup_task.add_done_callback(
- self._schedule_merge_wakeup)
-
- def _schedule_merge_wakeup(self, future):
- if not future.cancelled():
- future.result()
- if self._main_exit is not None and not self._main_exit.done():
- self._schedule()
-
- def _sigcont_handler(self, signum, frame):
- self._sigcont_time = time.time()
-
- def _job_delay(self):
- """
- @rtype: bool
- @return: True if job scheduling should be delayed, False otherwise.
- """
-
- if self._jobs and self._max_load is not None:
-
- current_time = time.time()
-
- if self._sigcont_time is not None:
-
- elapsed_seconds = current_time - self._sigcont_time
- # elapsed_seconds < 0 means the system clock has been adjusted
- if elapsed_seconds > 0 and \
- elapsed_seconds < self._sigcont_delay:
-
- if self._job_delay_timeout_id is not None:
- self._job_delay_timeout_id.cancel()
-
- self._job_delay_timeout_id = self._event_loop.call_later(
- self._sigcont_delay - elapsed_seconds,
- self._schedule)
- return True
-
- # Only set this to None after the delay has expired,
- # since this method may be called again before the
- # delay has expired.
- self._sigcont_time = None
-
- try:
- avg1, avg5, avg15 = getloadavg()
- except OSError:
- return False
-
- delay = self._job_delay_max * avg1 / self._max_load
- if delay > self._job_delay_max:
- delay = self._job_delay_max
- elapsed_seconds = current_time - self._previous_job_start_time
- # elapsed_seconds < 0 means the system clock has been adjusted
- if elapsed_seconds > 0 and elapsed_seconds < delay:
-
- if self._job_delay_timeout_id is not None:
- self._job_delay_timeout_id.cancel()
-
- self._job_delay_timeout_id = self._event_loop.call_later(
- delay - elapsed_seconds, self._schedule)
- return True
-
- return False
-
- def _schedule_tasks_imp(self):
- """
- @rtype: bool
- @return: True if state changed, False otherwise.
- """
-
- state_change = 0
-
- while True:
-
- if not self._keep_scheduling():
- return bool(state_change)
-
- if self._choose_pkg_return_early or \
- self._merge_wait_scheduled or \
- (self._jobs and self._unsatisfied_system_deps) or \
- not self._can_add_job() or \
- self._job_delay():
- return bool(state_change)
-
- pkg = self._choose_pkg()
- if pkg is None:
- return bool(state_change)
-
- state_change += 1
-
- if not pkg.installed:
- self._pkg_count.curval += 1
-
- task = self._task(pkg)
-
- if pkg.installed:
- merge = PackageMerge(merge=task, scheduler=self._sched_iface)
- self._running_tasks[id(merge)] = merge
- self._task_queues.merge.addFront(merge)
- merge.addExitListener(self._merge_exit)
-
- elif pkg.built:
- self._jobs += 1
- self._previous_job_start_time = time.time()
- self._status_display.running = self._jobs
- self._running_tasks[id(task)] = task
- task.scheduler = self._sched_iface
- self._task_queues.jobs.add(task)
- task.addExitListener(self._extract_exit)
-
- else:
- self._jobs += 1
- self._previous_job_start_time = time.time()
- self._status_display.running = self._jobs
- self._running_tasks[id(task)] = task
- task.scheduler = self._sched_iface
- self._task_queues.jobs.add(task)
- task.addExitListener(self._build_exit)
-
- return bool(state_change)
-
- def _task(self, pkg):
-
- pkg_to_replace = None
- if pkg.operation != "uninstall":
- vardb = pkg.root_config.trees["vartree"].dbapi
- previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
- if portage.cpv_getkey(x) == pkg.cp]
- if not previous_cpv and vardb.cpv_exists(pkg.cpv):
- # same cpv, different SLOT
- previous_cpv = [pkg.cpv]
- if previous_cpv:
- previous_cpv = previous_cpv.pop()
- pkg_to_replace = self._pkg(previous_cpv,
- "installed", pkg.root_config, installed=True,
- operation="uninstall")
-
- try:
- prefetcher = self._prefetchers.pop(pkg, None)
- except KeyError:
- # KeyError observed with PyPy 1.8, despite None given as default.
- # Note that PyPy 1.8 has the same WeakValueDictionary code as
- # CPython 2.7, so it may be possible for CPython to raise KeyError
- # here as well.
- prefetcher = None
- if prefetcher is not None and not prefetcher.isAlive():
- try:
- self._task_queues.fetch._task_queue.remove(prefetcher)
- except ValueError:
- pass
- prefetcher = None
-
- task = MergeListItem(args_set=self._args_set,
- background=self._background, binpkg_opts=self._binpkg_opts,
- build_opts=self._build_opts,
- config_pool=self._ConfigPool(pkg.root,
- self._allocate_config, self._deallocate_config),
- emerge_opts=self.myopts,
- find_blockers=self._find_blockers(pkg), logger=self._logger,
- mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
- pkg_to_replace=pkg_to_replace,
- prefetcher=prefetcher,
- scheduler=self._sched_iface,
- settings=self._allocate_config(pkg.root),
- statusMessage=self._status_msg,
- world_atom=self._world_atom)
-
- return task
-
- def _failed_pkg_msg(self, failed_pkg, action, preposition):
- pkg = failed_pkg.pkg
- msg = "%s to %s %s" % \
- (bad("Failed"), action, colorize("INFORM", pkg.cpv))
- if pkg.root_config.settings["ROOT"] != "/":
- msg += " %s %s" % (preposition, pkg.root)
-
- log_path = self._locate_failure_log(failed_pkg)
- if log_path is not None:
- msg += ", Log file:"
- self._status_msg(msg)
-
- if log_path is not None:
- self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
-
- def _status_msg(self, msg):
- """
- Display a brief status message (no newlines) in the status display.
- This is called by tasks to provide feedback to the user. This
- delegates the resposibility of generating \r and \n control characters,
- to guarantee that lines are created or erased when necessary and
- appropriate.
-
- @type msg: str
- @param msg: a brief status message (no newlines allowed)
- """
- if not self._background:
- writemsg_level("\n")
- self._status_display.displayMessage(msg)
-
- def _save_resume_list(self):
- """
- Do this before verifying the ebuild Manifests since it might
- be possible for the user to use --resume --skipfirst get past
- a non-essential package with a broken digest.
- """
- mtimedb = self._mtimedb
-
- mtimedb["resume"] = {}
- # Stored as a dict starting with portage-2.1.6_rc1, and supported
- # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
- # a list type for options.
- mtimedb["resume"]["myopts"] = self.myopts.copy()
-
- # Convert Atom instances to plain str.
- mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
- mtimedb["resume"]["mergelist"] = [list(x) \
- for x in self._mergelist \
- if isinstance(x, Package) and x.operation == "merge"]
-
- mtimedb.commit()
-
- def _calc_resume_list(self):
- """
- Use the current resume list to calculate a new one,
- dropping any packages with unsatisfied deps.
- @rtype: bool
- @return: True if successful, False otherwise.
- """
- print(colorize("GOOD", "*** Resuming merge..."))
-
- # free some memory before creating
- # the resume depgraph
- self._destroy_graph()
-
- myparams = create_depgraph_params(self.myopts, None)
- success = False
- e = None
- try:
- success, mydepgraph, dropped_tasks = resume_depgraph(
- self.settings, self.trees, self._mtimedb, self.myopts,
- myparams, self._spinner)
- except depgraph.UnsatisfiedResumeDep as exc:
- # rename variable to avoid python-3.0 error:
- # SyntaxError: can not delete variable 'e' referenced in nested
- # scope
- e = exc
- mydepgraph = e.depgraph
- dropped_tasks = {}
-
- if e is not None:
- def unsatisfied_resume_dep_msg():
- mydepgraph.display_problems()
- out = portage.output.EOutput()
- out.eerror("One or more packages are either masked or " + \
- "have missing dependencies:")
- out.eerror("")
- indent = " "
- show_parents = set()
- for dep in e.value:
- if dep.parent in show_parents:
- continue
- show_parents.add(dep.parent)
- if dep.atom is None:
- out.eerror(indent + "Masked package:")
- out.eerror(2 * indent + str(dep.parent))
- out.eerror("")
- else:
- out.eerror(indent + str(dep.atom) + " pulled in by:")
- out.eerror(2 * indent + str(dep.parent))
- out.eerror("")
- msg = "The resume list contains packages " + \
- "that are either masked or have " + \
- "unsatisfied dependencies. " + \
- "Please restart/continue " + \
- "the operation manually, or use --skipfirst " + \
- "to skip the first package in the list and " + \
- "any other packages that may be " + \
- "masked or have missing dependencies."
- for line in textwrap.wrap(msg, 72):
- out.eerror(line)
- self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
- return False
-
- if success and self._show_list():
- mydepgraph.display(mydepgraph.altlist(), favorites=self._favorites)
-
- if not success:
- self._post_mod_echo_msgs.append(mydepgraph.display_problems)
- return False
- mydepgraph.display_problems()
- self._init_graph(mydepgraph.schedulerGraph())
-
- msg_width = 75
- for task, atoms in dropped_tasks.items():
- if not (isinstance(task, Package) and task.operation == "merge"):
- continue
- pkg = task
- msg = "emerge --keep-going:" + \
- " %s" % (pkg.cpv,)
- if pkg.root_config.settings["ROOT"] != "/":
- msg += " for %s" % (pkg.root,)
- if not atoms:
- msg += " dropped because it is masked or unavailable"
- else:
- msg += " dropped because it requires %s" % ", ".join(atoms)
- for line in textwrap.wrap(msg, msg_width):
- eerror(line, phase="other", key=pkg.cpv)
- settings = self.pkgsettings[pkg.root]
- # Ensure that log collection from $T is disabled inside
- # elog_process(), since any logs that might exist are
- # not valid here.
- settings.pop("T", None)
- portage.elog.elog_process(pkg.cpv, settings)
- self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
-
- return True
-
- def _show_list(self):
- myopts = self.myopts
- if "--quiet" not in myopts and \
- ("--ask" in myopts or "--tree" in myopts or \
- "--verbose" in myopts):
- return True
- return False
-
- def _world_atom(self, pkg):
- """
- Add or remove the package to the world file, but only if
- it's supposed to be added or removed. Otherwise, do nothing.
- """
-
- if set(("--buildpkgonly", "--fetchonly",
- "--fetch-all-uri",
- "--oneshot", "--onlydeps",
- "--pretend")).intersection(self.myopts):
- return
-
- if pkg.root != self.target_root:
- return
-
- args_set = self._args_set
- if not args_set.findAtomForPackage(pkg):
- return
-
- logger = self._logger
- pkg_count = self._pkg_count
- root_config = pkg.root_config
- world_set = root_config.sets["selected"]
- world_locked = False
- atom = None
-
- if pkg.operation != "uninstall":
- atom = self._world_atoms.get(pkg)
-
- try:
-
- if hasattr(world_set, "lock"):
- world_set.lock()
- world_locked = True
-
- if hasattr(world_set, "load"):
- world_set.load() # maybe it's changed on disk
-
- if pkg.operation == "uninstall":
- if hasattr(world_set, "cleanPackage"):
- world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
- pkg.cpv)
- if hasattr(world_set, "remove"):
- for s in pkg.root_config.setconfig.active:
- world_set.remove(SETPREFIX+s)
- else:
- if atom is not None:
- if hasattr(world_set, "add"):
- self._status_msg(('Recording %s in "world" ' + \
- 'favorites file...') % atom)
- logger.log(" === (%s of %s) Updating world file (%s)" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv))
- world_set.add(atom)
- else:
- writemsg_level('\n!!! Unable to record %s in "world"\n' % \
- (atom,), level=logging.WARN, noiselevel=-1)
- finally:
- if world_locked:
- world_set.unlock()
-
- def _pkg(self, cpv, type_name, root_config, installed=False,
- operation=None, myrepo=None):
- """
- Get a package instance from the cache, or create a new
- one if necessary. Raises KeyError from aux_get if it
- failures for some reason (package does not exist or is
- corrupt).
- """
-
- # Reuse existing instance when available.
- pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
- type_name=type_name, repo_name=myrepo, root_config=root_config,
- installed=installed, operation=operation))
-
- if pkg is not None:
- return pkg
-
- tree_type = depgraph.pkg_tree_map[type_name]
- db = root_config.trees[tree_type].dbapi
- db_keys = list(self.trees[root_config.root][
- tree_type].dbapi._aux_cache_keys)
- metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
- pkg = Package(built=(type_name != "ebuild"),
- cpv=cpv, installed=installed, metadata=metadata,
- root_config=root_config, type_name=type_name)
- self._pkg_cache[pkg] = pkg
- return pkg
+ # max time between loadavg checks (seconds)
+ _loadavg_latency = 30
+
+ # max time between display status updates (seconds)
+ _max_display_latency = 3
+
+ _opts_ignore_blockers = \
+ frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri",
+ "--nodeps", "--pretend"])
+
+ _opts_no_background = \
+ frozenset(["--pretend",
+ "--fetchonly", "--fetch-all-uri"])
+
+ _opts_no_self_update = frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri", "--pretend"])
+
+ class _iface_class(SchedulerInterface):
+ __slots__ = ("fetch",
+ "scheduleSetup", "scheduleUnpack")
+
+ class _fetch_iface_class(SlotObject):
+ __slots__ = ("log_file", "schedule")
+
+ _task_queues_class = slot_dict_class(
+ ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
+
+ class _build_opts_class(SlotObject):
+ __slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
+ "fetch_all_uri", "fetchonly", "pretend")
+
+ class _binpkg_opts_class(SlotObject):
+ __slots__ = ("fetchonly", "getbinpkg", "pretend")
+
+ class _pkg_count_class(SlotObject):
+ __slots__ = ("curval", "maxval")
+
+ class _emerge_log_class(SlotObject):
+ __slots__ = ("xterm_titles",)
+
+ def log(self, *pargs, **kwargs):
+ if not self.xterm_titles:
+ # Avoid interference with the scheduler's status display.
+ kwargs.pop("short_msg", None)
+ emergelog(self.xterm_titles, *pargs, **kwargs)
+
+ class _failed_pkg(SlotObject):
+ __slots__ = ("build_dir", "build_log", "pkg",
+ "postinst_failure", "returncode")
+
+ class _ConfigPool(object):
+ """Interface for a task to temporarily allocate a config
+ instance from a pool. This allows a task to be constructed
+ long before the config instance actually becomes needed, like
+ when prefetchers are constructed for the whole merge list."""
+ __slots__ = ("_root", "_allocate", "_deallocate")
+ def __init__(self, root, allocate, deallocate):
+ self._root = root
+ self._allocate = allocate
+ self._deallocate = deallocate
+ def allocate(self):
+ return self._allocate(self._root)
+ def deallocate(self, settings):
+ self._deallocate(settings)
+
+ class _unknown_internal_error(portage.exception.PortageException):
+ """
+ Used internally to terminate scheduling. The specific reason for
+ the failure should have been dumped to stderr.
+ """
+ def __init__(self, value=""):
+ portage.exception.PortageException.__init__(self, value)
+
+ def __init__(self, settings, trees, mtimedb, myopts,
+ spinner, mergelist=None, favorites=None, graph_config=None):
+ PollScheduler.__init__(self, main=True)
+
+ if mergelist is not None:
+ warnings.warn("The mergelist parameter of the " + \
+ "_emerge.Scheduler constructor is now unused. Use " + \
+ "the graph_config parameter instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.settings = settings
+ self.target_root = settings["EROOT"]
+ self.trees = trees
+ self.myopts = myopts
+ self._spinner = spinner
+ self._mtimedb = mtimedb
+ self._favorites = favorites
+ self._args_set = InternalPackageSet(favorites, allow_repo=True)
+ self._build_opts = self._build_opts_class()
+ self.context = get_admin_context()
+
+ for k in self._build_opts.__slots__:
+ setattr(self._build_opts, k, myopts.get("--" + k.replace("_", "-")))
+ self._build_opts.buildpkg_exclude = InternalPackageSet( \
+ initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
+ allow_wildcard=True, allow_repo=True)
+ if "mirror" in self.settings.features:
+ self._build_opts.fetch_all_uri = True
+
+ self._binpkg_opts = self._binpkg_opts_class()
+ for k in self._binpkg_opts.__slots__:
+ setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
+
+ self.curval = 0
+ self._logger = self._emerge_log_class()
+ self._task_queues = self._task_queues_class()
+ for k in self._task_queues.allowed_keys:
+ setattr(self._task_queues, k,
+ SequentialTaskQueue())
+
+ # Holds merges that will wait to be executed when no builds are
+ # executing. This is useful for system packages since dependencies
+ # on system packages are frequently unspecified. For example, see
+ # bug #256616.
+ self._merge_wait_queue = deque()
+ # Holds merges that have been transfered from the merge_wait_queue to
+ # the actual merge queue. They are removed from this list upon
+ # completion. Other packages can start building only when this list is
+ # empty.
+ self._merge_wait_scheduled = []
+
+ # Holds system packages and their deep runtime dependencies. Before
+ # being merged, these packages go to merge_wait_queue, to be merged
+ # when no other packages are building.
+ self._deep_system_deps = set()
+
+ # Holds packages to merge which will satisfy currently unsatisfied
+ # deep runtime dependencies of system packages. If this is not empty
+ # then no parallel builds will be spawned until it is empty. This
+ # minimizes the possibility that a build will fail due to the system
+ # being in a fragile state. For example, see bug #259954.
+ self._unsatisfied_system_deps = set()
+
+ self._status_display = JobStatusDisplay(
+ xterm_titles=('notitles' not in settings.features))
+ self._max_load = myopts.get("--load-average")
+ max_jobs = myopts.get("--jobs")
+ if max_jobs is None:
+ max_jobs = 1
+ self._set_max_jobs(max_jobs)
+ self._running_root = trees[trees._running_eroot]["root_config"]
+ self.edebug = 0
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ self.edebug = 1
+ self.pkgsettings = {}
+ self._config_pool = {}
+ for root in self.trees:
+ self._config_pool[root] = []
+
+ self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
+ 'emerge-fetch.log')
+ fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
+ schedule=self._schedule_fetch)
+ self._sched_iface = self._iface_class(
+ self._event_loop,
+ is_background=self._is_background,
+ fetch=fetch_iface,
+ scheduleSetup=self._schedule_setup,
+ scheduleUnpack=self._schedule_unpack)
+
+ self._prefetchers = weakref.WeakValueDictionary()
+ self._pkg_queue = []
+ self._jobs = 0
+ self._running_tasks = {}
+ self._completed_tasks = set()
+ self._main_exit = None
+ self._main_loadavg_handle = None
+ self._schedule_merge_wakeup_task = None
+
+ self._failed_pkgs = []
+ self._failed_pkgs_all = []
+ self._failed_pkgs_die_msgs = []
+ self._post_mod_echo_msgs = []
+ self._parallel_fetch = False
+ self._init_graph(graph_config)
+ merge_count = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._pkg_count = self._pkg_count_class(
+ curval=0, maxval=merge_count)
+ self._status_display.maxval = self._pkg_count.maxval
+
+ # The load average takes some time to respond when new
+ # jobs are added, so we need to limit the rate of adding
+ # new jobs.
+ self._job_delay_max = 5
+ self._previous_job_start_time = None
+ self._job_delay_timeout_id = None
+
+ # The load average takes some time to respond when after
+ # a SIGSTOP/SIGCONT cycle, so delay scheduling for some
+ # time after SIGCONT is received.
+ self._sigcont_delay = 5
+ self._sigcont_time = None
+
+ # This is used to memoize the _choose_pkg() result when
+ # no packages can be chosen until one of the existing
+ # jobs completes.
+ self._choose_pkg_return_early = False
+
+ features = self.settings.features
+ if "parallel-fetch" in features and \
+ not ("--pretend" in self.myopts or \
+ "--fetch-all-uri" in self.myopts or \
+ "--fetchonly" in self.myopts):
+ if "distlocks" not in features:
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ portage.writemsg(red("!!!")+" parallel-fetching " + \
+ "requires the distlocks feature enabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+" you have it disabled, " + \
+ "thus parallel-fetching is being disabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ elif merge_count > 1:
+ self._parallel_fetch = True
+
+ if self._parallel_fetch:
+ # clear out existing fetch log if it exists
+ try:
+ open(self._fetch_log, 'w').close()
+ except EnvironmentError:
+ pass
+
+ self._running_portage = None
+ portage_match = self._running_root.trees["vartree"].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM)
+ if portage_match:
+ cpv = portage_match.pop()
+ self._running_portage = self._pkg(cpv, "installed",
+ self._running_root, installed=True)
+
+ def _handle_self_update(self):
+
+ if self._opts_no_self_update.intersection(self.myopts):
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+ if x.operation != "merge":
+ continue
+ if x.root != self._running_root.root:
+ continue
+ if not portage.dep.match_from_list(
+ portage.const.PORTAGE_PACKAGE_ATOM, [x]):
+ continue
+ rval = _check_temp_dir(self.settings)
+ if rval != os.EX_OK:
+ return rval
+ _prepare_self_update(self.settings)
+ break
+
+ return os.EX_OK
+
+ def _terminate_tasks(self):
+ self._status_display.quiet = True
+ for task in list(self._running_tasks.values()):
+ if task.isAlive():
+ # This task should keep the main loop running until
+ # it has had an opportunity to clean up after itself.
+ # Rely on its exit hook to remove it from
+ # self._running_tasks when it has finished cleaning up.
+ task.cancel()
+ else:
+ # This task has been waiting to be started in one of
+ # self._task_queues which are all cleared below. It
+ # will never be started, so purged it from
+ # self._running_tasks so that it won't keep the main
+ # loop running.
+ del self._running_tasks[id(task)]
+
+ for q in self._task_queues.values():
+ q.clear()
+
+ def _init_graph(self, graph_config):
+ """
+ Initialization structures used for dependency calculations
+ involving currently installed packages.
+ """
+ self._set_graph_config(graph_config)
+ self._blocker_db = {}
+ depgraph_params = create_depgraph_params(self.myopts, None)
+ dynamic_deps = "dynamic_deps" in depgraph_params
+ ignore_built_slot_operator_deps = self.myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
+ for root in self.trees:
+ if graph_config is None:
+ fake_vartree = FakeVartree(self.trees[root]["root_config"],
+ pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps,
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
+ fake_vartree.sync()
+ else:
+ fake_vartree = graph_config.trees[root]['vartree']
+ self._blocker_db[root] = BlockerDB(fake_vartree)
+
+ def _destroy_graph(self):
+ """
+ Use this to free memory at the beginning of _calc_resume_list().
+ After _calc_resume_list(), the _init_graph() method
+ must to be called in order to re-generate the structures that
+ this method destroys.
+ """
+ self._blocker_db = None
+ self._set_graph_config(None)
+ gc.collect()
+
+ def _set_max_jobs(self, max_jobs):
+ self._max_jobs = max_jobs
+ self._task_queues.jobs.max_jobs = max_jobs
+ if "parallel-install" in self.settings.features:
+ self._task_queues.merge.max_jobs = max_jobs
+
+ def _background_mode(self):
+ """
+ Check if background mode is enabled and adjust states as necessary.
+
+ @rtype: bool
+ @return: True if background mode is enabled, False otherwise.
+ """
+ background = (self._max_jobs is True or \
+ self._max_jobs > 1 or "--quiet" in self.myopts \
+ or self.myopts.get("--quiet-build") == "y") and \
+ not bool(self._opts_no_background.intersection(self.myopts))
+
+ if background:
+ interactive_tasks = self._get_interactive_tasks()
+ if interactive_tasks:
+ background = False
+ writemsg_level(">>> Sending package output to stdio due " + \
+ "to interactive package(s):\n",
+ level=logging.INFO, noiselevel=-1)
+ msg = [""]
+ for pkg in interactive_tasks:
+ pkg_str = " " + colorize("INFORM", str(pkg.cpv))
+ if pkg.root_config.settings["ROOT"] != "/":
+ pkg_str += " for " + pkg.root
+ msg.append(pkg_str)
+ msg.append("")
+ writemsg_level("".join("%s\n" % (l,) for l in msg),
+ level=logging.INFO, noiselevel=-1)
+ if self._max_jobs is True or self._max_jobs > 1:
+ self._set_max_jobs(1)
+ writemsg_level(">>> Setting --jobs=1 due " + \
+ "to the above interactive package(s)\n",
+ level=logging.INFO, noiselevel=-1)
+ writemsg_level(">>> In order to temporarily mask " + \
+ "interactive updates, you may\n" + \
+ ">>> specify --accept-properties=-interactive\n",
+ level=logging.INFO, noiselevel=-1)
+ self._status_display.quiet = \
+ not background or \
+ ("--quiet" in self.myopts and \
+ "--verbose" not in self.myopts)
+
+ self._logger.xterm_titles = \
+ "notitles" not in self.settings.features and \
+ self._status_display.quiet
+
+ return background
+
+ def _get_interactive_tasks(self):
+ interactive_tasks = []
+ for task in self._mergelist:
+ if not (isinstance(task, Package) and \
+ task.operation == "merge"):
+ continue
+ if 'interactive' in task.properties:
+ interactive_tasks.append(task)
+ return interactive_tasks
+
+ def _set_graph_config(self, graph_config):
+
+ if graph_config is None:
+ self._graph_config = None
+ self._pkg_cache = {}
+ self._digraph = None
+ self._mergelist = []
+ self._world_atoms = None
+ self._deep_system_deps.clear()
+ return
+
+ self._graph_config = graph_config
+ self._pkg_cache = graph_config.pkg_cache
+ self._digraph = graph_config.graph
+ self._mergelist = graph_config.mergelist
+
+ # Generate world atoms while the event loop is not running,
+ # since otherwise portdbapi match calls in the create_world_atom
+ # function could trigger event loop recursion.
+ self._world_atoms = {}
+ for pkg in self._mergelist:
+ if getattr(pkg, 'operation', None) != 'merge':
+ continue
+ atom = create_world_atom(pkg, self._args_set,
+ pkg.root_config, before_install=True)
+ if atom is not None:
+ self._world_atoms[pkg] = atom
+
+ if "--nodeps" in self.myopts or \
+ (self._max_jobs is not True and self._max_jobs < 2):
+ # save some memory
+ self._digraph = None
+ graph_config.graph = None
+ graph_config.pkg_cache.clear()
+ self._deep_system_deps.clear()
+ for pkg in self._mergelist:
+ self._pkg_cache[pkg] = pkg
+ return
+
+ self._find_system_deps()
+ self._prune_digraph()
+ self._prevent_builddir_collisions()
+ if '--debug' in self.myopts:
+ writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
+ self._digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ def _find_system_deps(self):
+ """
+ Find system packages and their deep runtime dependencies. Before being
+ merged, these packages go to merge_wait_queue, to be merged when no
+ other packages are building.
+ NOTE: This can only find deep system deps if the system set has been
+ added to the graph and traversed deeply (the depgraph "complete"
+ parameter will do this, triggered by emerge --complete-graph option).
+ """
+ params = create_depgraph_params(self.myopts, None)
+ if not params["implicit_system_deps"]:
+ return
+
+ deep_system_deps = self._deep_system_deps
+ deep_system_deps.clear()
+ deep_system_deps.update(
+ _find_deep_system_runtime_deps(self._digraph))
+ deep_system_deps.difference_update([pkg for pkg in \
+ deep_system_deps if pkg.operation != "merge"])
+
+ def _prune_digraph(self):
+ """
+ Prune any root nodes that are irrelevant.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+ removed_nodes = set()
+ while True:
+ for node in graph.root_nodes():
+ if not isinstance(node, Package) or \
+ (node.installed and node.operation == "nomerge") or \
+ node.onlydeps or \
+ node in completed_tasks:
+ removed_nodes.add(node)
+ if removed_nodes:
+ graph.difference_update(removed_nodes)
+ if not removed_nodes:
+ break
+ removed_nodes.clear()
+
+ def _prevent_builddir_collisions(self):
+ """
+ When building stages, sometimes the same exact cpv needs to be merged
+ to both $ROOTs. Add edges to the digraph in order to avoid collisions
+ in the builddir. Currently, normal file locks would be inappropriate
+ for this purpose since emerge holds all of it's build dir locks from
+ the main process.
+ """
+ cpv_map = {}
+ for pkg in self._mergelist:
+ if not isinstance(pkg, Package):
+ # a satisfied blocker
+ continue
+ if pkg.installed:
+ continue
+ if pkg.cpv not in cpv_map:
+ cpv_map[pkg.cpv] = [pkg]
+ continue
+ for earlier_pkg in cpv_map[pkg.cpv]:
+ self._digraph.add(earlier_pkg, pkg,
+ priority=DepPriority(buildtime=True))
+ cpv_map[pkg.cpv].append(pkg)
+
+ class _pkg_failure(portage.exception.PortageException):
+ """
+ An instance of this class is raised by unmerge() when
+ an uninstallation fails.
+ """
+ status = 1
+ def __init__(self, *pargs):
+ portage.exception.PortageException.__init__(self, pargs)
+ if pargs:
+ self.status = pargs[0]
+
+ def _schedule_fetch(self, fetcher):
+ """
+ Schedule a fetcher, in order to control the number of concurrent
+ fetchers. If self._max_jobs is greater than 1 then the fetch
+ queue is bypassed and the fetcher is started immediately,
+ otherwise it is added to the front of the parallel-fetch queue.
+ NOTE: The parallel-fetch queue is currently used to serialize
+ access to the parallel-fetch log, so changes in the log handling
+ would be required before it would be possible to enable
+ concurrent fetching within the parallel-fetch queue.
+ """
+ if self._max_jobs > 1:
+ fetcher.start()
+ else:
+ self._task_queues.fetch.addFront(fetcher)
+
+ def _schedule_setup(self, setup_phase):
+ """
+ Schedule a setup phase on the merge queue, in order to
+ serialize unsandboxed access to the live filesystem.
+ """
+ if self._task_queues.merge.max_jobs > 1 and \
+ "ebuild-locks" in self.settings.features:
+ # Use a separate queue for ebuild-locks when the merge
+ # queue allows more than 1 job (due to parallel-install),
+ # since the portage.locks module does not behave as desired
+ # if we try to lock the same file multiple times
+ # concurrently from the same process.
+ self._task_queues.ebuild_locks.add(setup_phase)
+ else:
+ self._task_queues.merge.add(setup_phase)
+ self._schedule()
+
+ def _schedule_unpack(self, unpack_phase):
+ """
+ Schedule an unpack phase on the unpack queue, in order
+ to serialize $DISTDIR access for live ebuilds.
+ """
+ self._task_queues.unpack.add(unpack_phase)
+
+ def _find_blockers(self, new_pkg):
+ """
+ Returns a callable.
+ """
+ def get_blockers():
+ return self._find_blockers_impl(new_pkg)
+ return get_blockers
+
+ def _find_blockers_impl(self, new_pkg):
+ if self._opts_ignore_blockers.intersection(self.myopts):
+ return None
+
+ blocker_db = self._blocker_db[new_pkg.root]
+
+ blocked_pkgs = []
+ for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
+ if new_pkg.slot_atom == blocking_pkg.slot_atom:
+ continue
+ if new_pkg.cpv == blocking_pkg.cpv:
+ continue
+ blocked_pkgs.append(blocking_pkg)
+
+ return blocked_pkgs
+
+ def _generate_digests(self):
+ """
+ Generate digests if necessary for --digests or FEATURES=digest.
+ In order to avoid interference, this must done before parallel
+ tasks are started.
+ """
+
+ digest = '--digest' in self.myopts
+ if not digest:
+ for pkgsettings in self.pkgsettings.values():
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if 'digest' in pkgsettings.features:
+ digest = True
+ break
+
+ if not digest:
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != 'ebuild' or \
+ x.operation != 'merge':
+ continue
+ pkgsettings = self.pkgsettings[x.root]
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if '--digest' not in self.myopts and \
+ 'digest' not in pkgsettings.features:
+ continue
+ portdb = x.root_config.trees['porttree'].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ pkgsettings['O'] = os.path.dirname(ebuild_path)
+ if not digestgen(mysettings=pkgsettings, myportdb=portdb):
+ writemsg_level(
+ "!!! Unable to generate manifest for '%s'.\n" \
+ % x.cpv, level=logging.ERROR, noiselevel=-1)
+ return FAILURE
+
+ return os.EX_OK
+
+ def _check_manifests(self):
+ # Verify all the manifests now so that the user is notified of failure
+ # as soon as possible.
+ if "strict" not in self.settings.features or \
+ "--fetchonly" in self.myopts or \
+ "--fetch-all-uri" in self.myopts:
+ return os.EX_OK
+
+ shown_verifying_msg = False
+ quiet_settings = {}
+ for myroot, pkgsettings in self.pkgsettings.items():
+ quiet_config = portage.config(clone=pkgsettings)
+ quiet_config["PORTAGE_QUIET"] = "1"
+ quiet_config.backup_changes("PORTAGE_QUIET")
+ quiet_settings[myroot] = quiet_config
+ del quiet_config
+
+ failures = 0
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != "ebuild":
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if not shown_verifying_msg:
+ shown_verifying_msg = True
+ self._status_msg("Verifying ebuild manifests")
+
+ root_config = x.root_config
+ portdb = root_config.trees["porttree"].dbapi
+ quiet_config = quiet_settings[root_config.root]
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ quiet_config["O"] = os.path.dirname(ebuild_path)
+ if not digestcheck([], quiet_config, strict=True):
+ failures |= 1
+
+ if failures:
+ return FAILURE
+ return os.EX_OK
+
+ def _add_prefetchers(self):
+
+ if not self._parallel_fetch:
+ return
+
+ if self._parallel_fetch:
+
+ prefetchers = self._prefetchers
+
+ for pkg in self._mergelist:
+ # mergelist can contain solved Blocker instances
+ if not isinstance(pkg, Package) or pkg.operation == "uninstall":
+ continue
+ prefetcher = self._create_prefetcher(pkg)
+ if prefetcher is not None:
+ # This will start the first prefetcher immediately, so that
+ # self._task() won't discard it. This avoids a case where
+ # the first prefetcher is discarded, causing the second
+ # prefetcher to occupy the fetch queue before the first
+ # fetcher has an opportunity to execute.
+ prefetchers[pkg] = prefetcher
+ self._task_queues.fetch.add(prefetcher)
+
+ def _create_prefetcher(self, pkg):
+ """
+ @return: a prefetcher, or None if not applicable
+ """
+ prefetcher = None
+
+ if not isinstance(pkg, Package):
+ pass
+
+ elif pkg.type_name == "ebuild":
+
+ prefetcher = EbuildFetcher(background=True,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ fetchonly=1, fetchall=self._build_opts.fetch_all_uri,
+ logfile=self._fetch_log,
+ pkg=pkg, prefetch=True, scheduler=self._sched_iface)
+
+ elif pkg.type_name == "binary" and \
+ "--getbinpkg" in self.myopts and \
+ pkg.root_config.trees["bintree"].isremote(pkg.cpv):
+
+ prefetcher = BinpkgPrefetcher(background=True,
+ pkg=pkg, scheduler=self._sched_iface)
+
+ return prefetcher
+
+ def _run_pkg_pretend(self):
+ """
+ Since pkg_pretend output may be important, this method sends all
+ output directly to stdout (regardless of options like --quiet or
+ --jobs).
+ """
+
+ failures = 0
+ sched_iface = self._sched_iface
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if x.eapi in ("0", "1", "2", "3"):
+ continue
+
+ if "pretend" not in x.defined_phases:
+ continue
+
+ out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
+ portage.util.writemsg_stdout(out_str, noiselevel=-1)
+
+ root_config = x.root_config
+ settings = self.pkgsettings[root_config.root]
+ settings.setcpv(x)
+
+ # setcpv/package.env allows for per-package PORTAGE_TMPDIR so we
+ # have to validate it for each package
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ return rval
+
+ build_dir_path = os.path.join(
+ os.path.realpath(settings["PORTAGE_TMPDIR"]),
+ "portage", x.category, x.pf)
+ existing_builddir = os.path.isdir(build_dir_path)
+ settings["PORTAGE_BUILDDIR"] = build_dir_path
+ build_dir = EbuildBuildDir(scheduler=sched_iface,
+ settings=settings)
+ sched_iface.run_until_complete(build_dir.async_lock())
+ current_task = None
+
+ try:
+ # Clean up the existing build dir, in case pkg_pretend
+ # checks for available space (bug #390711).
+ if existing_builddir:
+ if x.built:
+ tree = "bintree"
+ infloc = os.path.join(build_dir_path, "build-info")
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError(
+ "ebuild not found for '%s'" % x.cpv)
+ portage.package.ebuild.doebuild.doebuild_environment(
+ ebuild_path, "clean", settings=settings,
+ db=self.trees[settings['EROOT']][tree].dbapi)
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface, settings=settings)
+ current_task = clean_phase
+ clean_phase.start()
+ clean_phase.wait()
+
+ if x.built:
+ tree = "bintree"
+ bintree = root_config.trees["bintree"].dbapi.bintree
+ fetched = False
+
+ # Display fetch on stdout, so that it's always clear what
+ # is consuming time here.
+ if bintree.isremote(x.cpv):
+ fetcher = BinpkgFetcher(pkg=x,
+ scheduler=sched_iface)
+ fetcher.start()
+ if fetcher.wait() != os.EX_OK:
+ failures += 1
+ continue
+ fetched = fetcher.pkg_path
+
+ if fetched is False:
+ filename = bintree.getname(x.cpv)
+ else:
+ filename = fetched
+ verifier = BinpkgVerifier(pkg=x,
+ scheduler=sched_iface, _pkg_path=filename)
+ current_task = verifier
+ verifier.start()
+ if verifier.wait() != os.EX_OK:
+ failures += 1
+ continue
+
+ if fetched:
+ bintree.inject(x.cpv, filename=fetched)
+ infloc = os.path.join(build_dir_path, "build-info")
+ ensure_dirs(infloc)
+ self._sched_iface.run_until_complete(
+ bintree.dbapi.unpack_metadata(settings, infloc))
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ if self._build_opts.buildpkgonly:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+ else:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+
+ portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
+ "pretend", settings=settings,
+ db=self.trees[settings['EROOT']][tree].dbapi)
+
+ prepare_build_dirs(root_config.root, settings, cleanup=0)
+
+ vardb = root_config.trees['vartree'].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(x.slot_atom) + \
+ vardb.match('='+x.cpv)))
+ pretend_phase = EbuildPhase(
+ phase="pretend", scheduler=sched_iface,
+ settings=settings)
+
+ current_task = pretend_phase
+ pretend_phase.start()
+ ret = pretend_phase.wait()
+ if ret != os.EX_OK:
+ failures += 1
+ portage.elog.elog_process(x.cpv, settings)
+ finally:
+
+ if current_task is not None:
+ if current_task.isAlive():
+ current_task.cancel()
+ current_task.wait()
+ if current_task.returncode == os.EX_OK:
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface,
+ settings=settings)
+ clean_phase.start()
+ clean_phase.wait()
+
+ sched_iface.run_until_complete(build_dir.async_unlock())
+
+ if failures:
+ return FAILURE
+ return os.EX_OK
+
+ def merge(self):
+ if "--resume" in self.myopts:
+ # We're resuming.
+ portage.writemsg_stdout(
+ colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
+ self._logger.log(" *** Resuming merge...")
+
+ self._save_resume_list()
+
+ try:
+ self._background = self._background_mode()
+ except self._unknown_internal_error:
+ return FAILURE
+
+ rval = self._handle_self_update()
+ if rval != os.EX_OK:
+ return rval
+
+ for root in self.trees:
+ root_config = self.trees[root]["root_config"]
+
+ # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
+ # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
+ # for ensuring sane $PWD (bug #239560) and storing elog messages.
+ tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
+ if not tmpdir or not os.path.isdir(tmpdir):
+ msg = (
+ 'The directory specified in your PORTAGE_TMPDIR variable does not exist:',
+ tmpdir,
+ 'Please create this directory or correct your PORTAGE_TMPDIR setting.',
+ )
+ out = portage.output.EOutput()
+ for l in msg:
+ out.eerror(l)
+ return FAILURE
+
+ if self._background:
+ root_config.settings.unlock()
+ root_config.settings["PORTAGE_BACKGROUND"] = "1"
+ root_config.settings.backup_changes("PORTAGE_BACKGROUND")
+ root_config.settings.lock()
+
+ self.pkgsettings[root] = portage.config(
+ clone=root_config.settings)
+
+ keep_going = "--keep-going" in self.myopts
+ fetchonly = self._build_opts.fetchonly
+ mtimedb = self._mtimedb
+ failed_pkgs = self._failed_pkgs
+
+ rval = self._generate_digests()
+ if rval != os.EX_OK:
+ return rval
+
+ # TODO: Immediately recalculate deps here if --keep-going
+ # is enabled and corrupt manifests are detected.
+ rval = self._check_manifests()
+ if rval != os.EX_OK and not keep_going:
+ return rval
+
+ if not fetchonly:
+ rval = self._run_pkg_pretend()
+ if rval != os.EX_OK:
+ return rval
+
+ while True:
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+ {"signal":signum})
+ self.terminate()
+ received_signal.append(128 + signum)
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+ earlier_sigcont_handler = \
+ signal.signal(signal.SIGCONT, self._sigcont_handler)
+ signal.siginterrupt(signal.SIGCONT, False)
+
+ try:
+ rval = self._merge()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ if earlier_sigcont_handler is not None:
+ signal.signal(signal.SIGCONT, earlier_sigcont_handler)
+ else:
+ signal.signal(signal.SIGCONT, signal.SIG_DFL)
+
+ self._termination_check()
+ if received_signal:
+ sys.exit(received_signal[0])
+
+ if rval == os.EX_OK or fetchonly or not keep_going:
+ break
+ if "resume" not in mtimedb:
+ break
+ mergelist = self._mtimedb["resume"].get("mergelist")
+ if not mergelist:
+ break
+
+ if not failed_pkgs:
+ break
+
+ for failed_pkg in failed_pkgs:
+ mergelist.remove(list(failed_pkg.pkg))
+
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ if not mergelist:
+ break
+
+ if not self._calc_resume_list():
+ break
+
+ clear_caches(self.trees)
+ if not self._mergelist:
+ break
+
+ self._save_resume_list()
+ self._pkg_count.curval = 0
+ self._pkg_count.maxval = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._status_display.maxval = self._pkg_count.maxval
+
+ # Cleanup any callbacks that have been registered with the global
+ # event loop by calls to the terminate method.
+ self._cleanup()
+
+ self._logger.log(" *** Finished. Cleaning up...")
+
+ if failed_pkgs:
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ printer = portage.output.EOutput()
+ background = self._background
+ failure_log_shown = False
+ if background and len(self._failed_pkgs_all) == 1 and \
+ self.myopts.get('--quiet-fail', 'n') != 'y':
+ # If only one package failed then just show it's
+ # whole log for easy viewing.
+ failed_pkg = self._failed_pkgs_all[-1]
+ log_file = None
+ log_file_real = None
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ try:
+ log_file = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'), mode='rb')
+ except IOError:
+ pass
+ else:
+ if log_path.endswith('.gz'):
+ log_file_real = log_file
+ log_file = gzip.GzipFile(filename='',
+ mode='rb', fileobj=log_file)
+
+ if log_file is not None:
+ try:
+ for line in log_file:
+ writemsg_level(line, noiselevel=-1)
+ except zlib.error as e:
+ writemsg_level("%s\n" % (e,), level=logging.ERROR,
+ noiselevel=-1)
+ finally:
+ log_file.close()
+ if log_file_real is not None:
+ log_file_real.close()
+ failure_log_shown = True
+
+ # Dump mod_echo output now since it tends to flood the terminal.
+ # This allows us to avoid having more important output, generated
+ # later, from being swept away by the mod_echo output.
+ mod_echo_output = _flush_elog_mod_echo()
+
+ if background and not failure_log_shown and \
+ self._failed_pkgs_all and \
+ self._failed_pkgs_die_msgs and \
+ not mod_echo_output:
+
+ for mysettings, key, logentries in self._failed_pkgs_die_msgs:
+ root_msg = ""
+ if mysettings["ROOT"] != "/":
+ root_msg = " merged to %s" % mysettings["ROOT"]
+ print()
+ printer.einfo("Error messages for package %s%s:" % \
+ (colorize("INFORM", key), root_msg))
+ print()
+ for phase in portage.const.EBUILD_PHASES:
+ if phase not in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ printer.eerror(line.strip("\n"))
+
+ if self._post_mod_echo_msgs:
+ for msg in self._post_mod_echo_msgs:
+ msg()
+
+ if len(self._failed_pkgs_all) > 1 or \
+ (self._failed_pkgs_all and keep_going):
+ if len(self._failed_pkgs_all) > 1:
+ msg = "The following %d packages have " % \
+ len(self._failed_pkgs_all) + \
+ "failed to build, install, or execute postinst:"
+ else:
+ msg = "The following package has " + \
+ "failed to build, install, or execute postinst:"
+
+ printer.eerror("")
+ for line in textwrap.wrap(msg, 72):
+ printer.eerror(line)
+ printer.eerror("")
+ for failed_pkg in self._failed_pkgs_all:
+ # Use unicode_literals to force unicode format string so
+ # that Package.__unicode__() is called in python2.
+ msg = " %s" % (failed_pkg.pkg,)
+ if failed_pkg.postinst_failure:
+ msg += " (postinst failed)"
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ printer.eerror(msg)
+ if log_path is not None:
+ printer.eerror(" '%s'" % colorize('INFORM', log_path))
+ printer.eerror("")
+
+ if self._failed_pkgs_all:
+ return FAILURE
+ return os.EX_OK
+
+ def _elog_listener(self, mysettings, key, logentries, fulltext):
+ errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
+ if errors:
+ self._failed_pkgs_die_msgs.append(
+ (mysettings, key, errors))
+
+ def _locate_failure_log(self, failed_pkg):
+
+ log_paths = [failed_pkg.build_log]
+
+ for log_path in log_paths:
+ if not log_path:
+ continue
+
+ try:
+ log_size = os.stat(log_path).st_size
+ except OSError:
+ continue
+
+ if log_size == 0:
+ continue
+
+ return log_path
+
+ return None
+
+ def _add_packages(self):
+ pkg_queue = self._pkg_queue
+ for pkg in self._mergelist:
+ if isinstance(pkg, Package):
+ pkg_queue.append(pkg)
+ elif isinstance(pkg, Blocker):
+ pass
+
+ def _system_merge_started(self, merge):
+ """
+ Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
+ In general, this keeps track of installed system packages with
+ unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
+ a fragile situation, so we don't execute any unrelated builds until
+ the circular dependencies are built and installed.
+ """
+ graph = self._digraph
+ if graph is None:
+ return
+ pkg = merge.merge.pkg
+
+ # Skip this if $ROOT != / since it shouldn't matter if there
+ # are unsatisfied system runtime deps in this case.
+ if pkg.root_config.settings["ROOT"] != "/":
+ return
+
+ completed_tasks = self._completed_tasks
+ unsatisfied = self._unsatisfied_system_deps
+
+ def ignore_non_runtime_or_satisfied(priority):
+ """
+ Ignore non-runtime and satisfied runtime priorities.
+ """
+ if isinstance(priority, DepPriority) and \
+ not priority.satisfied and \
+ (priority.runtime or priority.runtime_post):
+ return False
+ return True
+
+ # When checking for unsatisfied runtime deps, only check
+ # direct deps since indirect deps are checked when the
+ # corresponding parent is merged.
+ for child in graph.child_nodes(pkg,
+ ignore_priority=ignore_non_runtime_or_satisfied):
+ if not isinstance(child, Package) or \
+ child.operation == 'uninstall':
+ continue
+ if child is pkg:
+ continue
+ if child.operation == 'merge' and \
+ child not in completed_tasks:
+ unsatisfied.add(child)
+
+ def _merge_wait_exit_handler(self, task):
+ self._merge_wait_scheduled.remove(task)
+ self._merge_exit(task)
+
+ def _merge_exit(self, merge):
+ self._running_tasks.pop(id(merge), None)
+ self._do_merge_exit(merge)
+ self._deallocate_config(merge.merge.settings)
+ if merge.returncode == os.EX_OK and \
+ not merge.merge.pkg.installed:
+ self._status_display.curval += 1
+ self._status_display.merges = len(self._task_queues.merge)
+ self._schedule()
+
+ def _do_merge_exit(self, merge):
+ pkg = merge.merge.pkg
+ settings = merge.merge.settings
+ trees = self.trees
+ if merge.returncode != os.EX_OK:
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=pkg,
+ returncode=merge.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
+ self._status_display.failed = len(self._failed_pkgs)
+ check_build(settings, pkg, trees)
+ return
+
+ if merge.postinst_failure:
+ # Append directly to _failed_pkgs_all for non-critical errors.
+ self._failed_pkgs_all.append(self._failed_pkg(
+ build_dir=merge.merge.settings.get("PORTAGE_BUILDDIR"),
+ build_log=merge.merge.settings.get("PORTAGE_LOG_FILE"),
+ pkg=pkg,
+ postinst_failure=True,
+ returncode=merge.returncode))
+ self._failed_pkg_msg(self._failed_pkgs_all[-1],
+ "execute postinst for", "for")
+
+ self._task_complete(pkg)
+ pkg_to_replace = merge.merge.pkg_to_replace
+ if pkg_to_replace is not None:
+ # When a package is replaced, mark it's uninstall
+ # task complete (if any).
+ if self._digraph is not None and \
+ pkg_to_replace in self._digraph:
+ try:
+ self._pkg_queue.remove(pkg_to_replace)
+ except ValueError:
+ pass
+ self._task_complete(pkg_to_replace)
+ else:
+ self._pkg_cache.pop(pkg_to_replace, None)
+
+ if pkg.installed:
+ check_build(settings, pkg, trees)
+ return
+
+ # Call mtimedb.commit() after each merge so that
+ # --resume still works after being interrupted
+ # by reboot, sigkill or similar.
+ mtimedb = self._mtimedb
+ mtimedb["resume"]["mergelist"].remove(list(pkg))
+ if not mtimedb["resume"]["mergelist"]:
+ del mtimedb["resume"]
+ mtimedb.commit()
+ check_build(settings, pkg, trees)
+
+ def _build_exit(self, build):
+ self._running_tasks.pop(id(build), None)
+ if build.returncode == os.EX_OK and self._terminated_tasks:
+ # We've been interrupted, so we won't
+ # add this to the merge queue.
+ self.curval += 1
+ self._deallocate_config(build.settings)
+ elif build.returncode == os.EX_OK:
+ self.curval += 1
+ merge = PackageMerge(merge=build, scheduler=self._sched_iface)
+ self._running_tasks[id(merge)] = merge
+ if not build.build_opts.buildpkgonly and \
+ build.pkg in self._deep_system_deps:
+ # Since dependencies on system packages are frequently
+ # unspecified, merge them only when no builds are executing.
+ self._merge_wait_queue.append(merge)
+ merge.addStartListener(self._system_merge_started)
+ else:
+ self._task_queues.merge.add(merge)
+ merge.addExitListener(self._merge_exit)
+ self._status_display.merges = len(self._task_queues.merge)
+ else:
+ settings = build.settings
+ trees = self.trees
+ pkg = build.pkg
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=build.pkg,
+ returncode=build.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
+ self._status_display.failed = len(self._failed_pkgs)
+ self._deallocate_config(build.settings)
+ check_build(settings, pkg, trees)
+ self._jobs -= 1
+ self._status_display.running = self._jobs
+ self._schedule()
+
+ def _extract_exit(self, build):
+ self._build_exit(build)
+
+ def _task_complete(self, pkg):
+ self._completed_tasks.add(pkg)
+ self._unsatisfied_system_deps.discard(pkg)
+ self._choose_pkg_return_early = False
+ blocker_db = self._blocker_db[pkg.root]
+ blocker_db.discardBlocker(pkg)
+
+ def _main_loop(self):
+ self._main_exit = self._event_loop.create_future()
+
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ self._main_loadavg_handle = self._event_loop.call_later(
+ self._loadavg_latency, self._schedule)
+
+ self._schedule()
+ self._event_loop.run_until_complete(self._main_exit)
+
+ def _merge(self):
+
+ if self._opts_no_background.intersection(self.myopts):
+ self._set_max_jobs(1)
+
+ self._add_prefetchers()
+ self._add_packages()
+ failed_pkgs = self._failed_pkgs
+ portage.locks._quiet = self._background
+ portage.elog.add_listener(self._elog_listener)
+
+ def display_callback():
+ self._status_display.display()
+ display_callback.handle = self._event_loop.call_later(
+ self._max_display_latency, display_callback)
+ display_callback.handle = None
+
+ if self._status_display._isatty and not self._status_display.quiet:
+ display_callback()
+ rval = os.EX_OK
+
+ try:
+ self._main_loop()
+ finally:
+ self._main_loop_cleanup()
+ portage.locks._quiet = False
+ portage.elog.remove_listener(self._elog_listener)
+ if display_callback.handle is not None:
+ display_callback.handle.cancel()
+ if failed_pkgs:
+ rval = failed_pkgs[-1].returncode
+
+ return rval
+
+ def _main_loop_cleanup(self):
+ del self._pkg_queue[:]
+ self._completed_tasks.clear()
+ self._deep_system_deps.clear()
+ self._unsatisfied_system_deps.clear()
+ self._choose_pkg_return_early = False
+ self._status_display.reset()
+ self._digraph = None
+ self._task_queues.fetch.clear()
+ self._prefetchers.clear()
+ self._main_exit = None
+ if self._main_loadavg_handle is not None:
+ self._main_loadavg_handle.cancel()
+ self._main_loadavg_handle = None
+ if self._job_delay_timeout_id is not None:
+ self._job_delay_timeout_id.cancel()
+ self._job_delay_timeout_id = None
+ if self._schedule_merge_wakeup_task is not None:
+ self._schedule_merge_wakeup_task.cancel()
+ self._schedule_merge_wakeup_task = None
+
+ def _choose_pkg(self):
+ """
+ Choose a task that has all its dependencies satisfied. This is used
+ for parallel build scheduling, and ensures that we don't build
+ anything with deep dependencies that have yet to be merged.
+ """
+
+ if self._choose_pkg_return_early:
+ return None
+
+ if self._digraph is None:
+ if self._is_work_scheduled() and \
+ not ("--nodeps" in self.myopts and \
+ (self._max_jobs is True or self._max_jobs > 1)):
+ self._choose_pkg_return_early = True
+ return None
+ return self._pkg_queue.pop(0)
+
+ if not self._is_work_scheduled():
+ return self._pkg_queue.pop(0)
+
+ self._prune_digraph()
+
+ chosen_pkg = None
+
+ # Prefer uninstall operations when available.
+ graph = self._digraph
+ for pkg in self._pkg_queue:
+ if pkg.operation == 'uninstall' and \
+ not graph.child_nodes(pkg):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is None:
+ later = set(self._pkg_queue)
+ for pkg in self._pkg_queue:
+ later.remove(pkg)
+ if not self._dependent_on_scheduled_merges(pkg, later):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is not None:
+ self._pkg_queue.remove(chosen_pkg)
+
+ if chosen_pkg is None:
+ # There's no point in searching for a package to
+ # choose until at least one of the existing jobs
+ # completes.
+ self._choose_pkg_return_early = True
+
+ return chosen_pkg
+
+ def _dependent_on_scheduled_merges(self, pkg, later):
+ """
+ Traverse the subgraph of the given packages deep dependencies
+ to see if it contains any scheduled merges.
+ @param pkg: a package to check dependencies for
+ @type pkg: Package
+ @param later: packages for which dependence should be ignored
+ since they will be merged later than pkg anyway and therefore
+ delaying the merge of pkg will not result in a more optimal
+ merge order
+ @type later: set
+ @rtype: bool
+ @return: True if the package is dependent, False otherwise.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+
+ dependent = False
+ traversed_nodes = set([pkg])
+ direct_deps = graph.child_nodes(pkg)
+ node_stack = direct_deps
+ direct_deps = frozenset(direct_deps)
+ while node_stack:
+ node = node_stack.pop()
+ if node in traversed_nodes:
+ continue
+ traversed_nodes.add(node)
+ if not ((node.installed and node.operation == "nomerge") or \
+ (node.operation == "uninstall" and \
+ node not in direct_deps) or \
+ node in completed_tasks or \
+ node in later):
+ dependent = True
+ break
+
+ # Don't traverse children of uninstall nodes since
+ # those aren't dependencies in the usual sense.
+ if node.operation != "uninstall":
+ node_stack.extend(graph.child_nodes(node))
+
+ return dependent
+
+ def _allocate_config(self, root):
+ """
+ Allocate a unique config instance for a task in order
+ to prevent interference between parallel tasks.
+ """
+ if self._config_pool[root]:
+ temp_settings = self._config_pool[root].pop()
+ else:
+ temp_settings = portage.config(clone=self.pkgsettings[root])
+ # Since config.setcpv() isn't guaranteed to call config.reset() due to
+ # performance reasons, call it here to make sure all settings from the
+ # previous package get flushed out (such as PORTAGE_LOG_FILE).
+ temp_settings.reload()
+ temp_settings.reset()
+ return temp_settings
+
+ def _deallocate_config(self, settings):
+ self._config_pool[settings['EROOT']].append(settings)
+
+ def _keep_scheduling(self):
+ return bool(not self._terminated.is_set() and self._pkg_queue and \
+ not (self._failed_pkgs and not self._build_opts.fetchonly))
+
+ def _is_work_scheduled(self):
+ return bool(self._running_tasks)
+
+ def _running_job_count(self):
+ return self._jobs
+
+ def _schedule_tasks(self):
+
+ while True:
+
+ state_change = 0
+
+ # When the number of jobs and merges drops to zero,
+ # process a single merge from _merge_wait_queue if
+ # it's not empty. We only process one since these are
+ # special packages and we want to ensure that
+ # parallel-install does not cause more than one of
+ # them to install at the same time.
+ if (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge):
+ task = self._merge_wait_queue.popleft()
+ task.scheduler = self._sched_iface
+ self._merge_wait_scheduled.append(task)
+ self._task_queues.merge.add(task)
+ task.addExitListener(self._merge_wait_exit_handler)
+ self._status_display.merges = len(self._task_queues.merge)
+ state_change += 1
+
+ if self._schedule_tasks_imp():
+ state_change += 1
+
+ self._status_display.display()
+
+ # Cancel prefetchers if they're the only reason
+ # the main poll loop is still running.
+ if self._failed_pkgs and not self._build_opts.fetchonly and \
+ not self._is_work_scheduled() and \
+ self._task_queues.fetch:
+ # Since this happens asynchronously, it doesn't count in
+ # state_change (counting it triggers an infinite loop).
+ self._task_queues.fetch.clear()
+
+ if not (state_change or \
+ (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge)):
+ break
+
+ if not (self._is_work_scheduled() or
+ self._keep_scheduling() or self._main_exit.done()):
+ self._main_exit.set_result(None)
+ elif self._main_loadavg_handle is not None:
+ self._main_loadavg_handle.cancel()
+ self._main_loadavg_handle = self._event_loop.call_later(
+ self._loadavg_latency, self._schedule)
+
+ # Failure to schedule *after* self._task_queues.merge becomes
+ # empty will cause the scheduler to hang as in bug 711322.
+ # Do not rely on scheduling which occurs via the _merge_exit
+ # method, since the order of callback invocation may cause
+ # self._task_queues.merge to appear non-empty when it is
+ # about to become empty.
+ if (self._task_queues.merge and (self._schedule_merge_wakeup_task is None
+ or self._schedule_merge_wakeup_task.done())):
+ self._schedule_merge_wakeup_task = asyncio.ensure_future(
+ self._task_queues.merge.wait(), loop=self._event_loop)
+ self._schedule_merge_wakeup_task.add_done_callback(
+ self._schedule_merge_wakeup)
+
+ def _schedule_merge_wakeup(self, future):
+ if not future.cancelled():
+ future.result()
+ if self._main_exit is not None and not self._main_exit.done():
+ self._schedule()
+
+ def _sigcont_handler(self, signum, frame):
+ self._sigcont_time = time.time()
+
+ def _job_delay(self):
+ """
+ @rtype: bool
+ @return: True if job scheduling should be delayed, False otherwise.
+ """
+
+ if self._jobs and self._max_load is not None:
+
+ current_time = time.time()
+
+ if self._sigcont_time is not None:
+
+ elapsed_seconds = current_time - self._sigcont_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and \
+ elapsed_seconds < self._sigcont_delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._job_delay_timeout_id.cancel()
+
+ self._job_delay_timeout_id = self._event_loop.call_later(
+ self._sigcont_delay - elapsed_seconds,
+ self._schedule)
+ return True
+
+ # Only set this to None after the delay has expired,
+ # since this method may be called again before the
+ # delay has expired.
+ self._sigcont_time = None
+
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ delay = self._job_delay_max * avg1 / self._max_load
+ if delay > self._job_delay_max:
+ delay = self._job_delay_max
+ elapsed_seconds = current_time - self._previous_job_start_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and elapsed_seconds < delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._job_delay_timeout_id.cancel()
+
+ self._job_delay_timeout_id = self._event_loop.call_later(
+ delay - elapsed_seconds, self._schedule)
+ return True
+
+ return False
+
+ def _schedule_tasks_imp(self):
+ """
+ @rtype: bool
+ @return: True if state changed, False otherwise.
+ """
+
+ state_change = 0
+
+ while True:
+
+ if not self._keep_scheduling():
+ return bool(state_change)
+
+ if self._choose_pkg_return_early or \
+ self._merge_wait_scheduled or \
+ (self._jobs and self._unsatisfied_system_deps) or \
+ not self._can_add_job() or \
+ self._job_delay():
+ return bool(state_change)
+
+ pkg = self._choose_pkg()
+ if pkg is None:
+ return bool(state_change)
+
+ state_change += 1
+
+ if not pkg.installed:
+ self._pkg_count.curval += 1
+
+ task = self._task(pkg)
+
+ if pkg.installed:
+ merge = PackageMerge(merge=task, scheduler=self._sched_iface)
+ self._running_tasks[id(merge)] = merge
+ self._task_queues.merge.addFront(merge)
+ merge.addExitListener(self._merge_exit)
+
+ elif pkg.built:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.scheduler = self._sched_iface
+ self._task_queues.jobs.add(task)
+ task.addExitListener(self._extract_exit)
+
+ else:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.scheduler = self._sched_iface
+ self._task_queues.jobs.add(task)
+ task.addExitListener(self._build_exit)
+
+ return bool(state_change)
+
+ def _task(self, pkg):
+
+ pkg_to_replace = None
+ if pkg.operation != "uninstall":
+ vardb = pkg.root_config.trees["vartree"].dbapi
+ previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
+ if portage.cpv_getkey(x) == pkg.cp]
+ if not previous_cpv and vardb.cpv_exists(pkg.cpv):
+ # same cpv, different SLOT
+ previous_cpv = [pkg.cpv]
+ if previous_cpv:
+ previous_cpv = previous_cpv.pop()
+ pkg_to_replace = self._pkg(previous_cpv,
+ "installed", pkg.root_config, installed=True,
+ operation="uninstall")
+
+ try:
+ prefetcher = self._prefetchers.pop(pkg, None)
+ except KeyError:
+ # KeyError observed with PyPy 1.8, despite None given as default.
+ # Note that PyPy 1.8 has the same WeakValueDictionary code as
+ # CPython 2.7, so it may be possible for CPython to raise KeyError
+ # here as well.
+ prefetcher = None
+ if prefetcher is not None and not prefetcher.isAlive():
+ try:
+ self._task_queues.fetch._task_queue.remove(prefetcher)
+ except ValueError:
+ pass
+ prefetcher = None
+
+ task = MergeListItem(args_set=self._args_set,
+ background=self._background, binpkg_opts=self._binpkg_opts,
+ build_opts=self._build_opts,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ emerge_opts=self.myopts,
+ find_blockers=self._find_blockers(pkg), logger=self._logger,
+ mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
+ pkg_to_replace=pkg_to_replace,
+ prefetcher=prefetcher,
+ scheduler=self._sched_iface,
+ settings=self._allocate_config(pkg.root),
+ statusMessage=self._status_msg,
+ world_atom=self._world_atom)
+
+ return task
+
+ def _failed_pkg_msg(self, failed_pkg, action, preposition):
+ pkg = failed_pkg.pkg
+ msg = "%s to %s %s" % \
+ (bad("Failed"), action, colorize("INFORM", pkg.cpv))
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ self._status_msg(msg)
+
+ if log_path is not None:
+ self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
+
+ def _status_msg(self, msg):
+ """
+ Display a brief status message (no newlines) in the status display.
+ This is called by tasks to provide feedback to the user. This
+ delegates the resposibility of generating \r and \n control characters,
+ to guarantee that lines are created or erased when necessary and
+ appropriate.
+
+ @type msg: str
+ @param msg: a brief status message (no newlines allowed)
+ """
+ if not self._background:
+ writemsg_level("\n")
+ self._status_display.displayMessage(msg)
+
+ def _save_resume_list(self):
+ """
+ Do this before verifying the ebuild Manifests since it might
+ be possible for the user to use --resume --skipfirst get past
+ a non-essential package with a broken digest.
+ """
+ mtimedb = self._mtimedb
+
+ mtimedb["resume"] = {}
+ # Stored as a dict starting with portage-2.1.6_rc1, and supported
+ # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
+ # a list type for options.
+ mtimedb["resume"]["myopts"] = self.myopts.copy()
+
+ # Convert Atom instances to plain str.
+ mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
+ mtimedb["resume"]["mergelist"] = [list(x) \
+ for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"]
+
+ mtimedb.commit()
+
+ def _calc_resume_list(self):
+ """
+ Use the current resume list to calculate a new one,
+ dropping any packages with unsatisfied deps.
+ @rtype: bool
+ @return: True if successful, False otherwise.
+ """
+ print(colorize("GOOD", "*** Resuming merge..."))
+
+ # free some memory before creating
+ # the resume depgraph
+ self._destroy_graph()
+
+ myparams = create_depgraph_params(self.myopts, None)
+ success = False
+ e = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ self.settings, self.trees, self._mtimedb, self.myopts,
+ myparams, self._spinner)
+ except depgraph.UnsatisfiedResumeDep as exc:
+ # rename variable to avoid python-3.0 error:
+ # SyntaxError: can not delete variable 'e' referenced in nested
+ # scope
+ e = exc
+ mydepgraph = e.depgraph
+ dropped_tasks = {}
+
+ if e is not None:
+ def unsatisfied_resume_dep_msg():
+ mydepgraph.display_problems()
+ out = portage.output.EOutput()
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ show_parents = set()
+ for dep in e.value:
+ if dep.parent in show_parents:
+ continue
+ show_parents.add(dep.parent)
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+ self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
+ return False
+
+ if success and self._show_list():
+ mydepgraph.display(mydepgraph.altlist(), favorites=self._favorites)
+
+ if not success:
+ self._post_mod_echo_msgs.append(mydepgraph.display_problems)
+ return False
+ mydepgraph.display_problems()
+ self._init_graph(mydepgraph.schedulerGraph())
+
+ msg_width = 75
+ for task, atoms in dropped_tasks.items():
+ if not (isinstance(task, Package) and task.operation == "merge"):
+ continue
+ pkg = task
+ msg = "emerge --keep-going:" + \
+ " %s" % (pkg.cpv,)
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " for %s" % (pkg.root,)
+ if not atoms:
+ msg += " dropped because it is masked or unavailable"
+ else:
+ msg += " dropped because it requires %s" % ", ".join(atoms)
+ for line in textwrap.wrap(msg, msg_width):
+ eerror(line, phase="other", key=pkg.cpv)
+ settings = self.pkgsettings[pkg.root]
+ # Ensure that log collection from $T is disabled inside
+ # elog_process(), since any logs that might exist are
+ # not valid here.
+ settings.pop("T", None)
+ portage.elog.elog_process(pkg.cpv, settings)
+ self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
+
+ return True
+
+ def _show_list(self):
+ myopts = self.myopts
+ if "--quiet" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts):
+ return True
+ return False
+
+ def _world_atom(self, pkg):
+ """
+ Add or remove the package to the world file, but only if
+ it's supposed to be added or removed. Otherwise, do nothing.
+ """
+
+ if set(("--buildpkgonly", "--fetchonly",
+ "--fetch-all-uri",
+ "--oneshot", "--onlydeps",
+ "--pretend")).intersection(self.myopts):
+ return
+
+ if pkg.root != self.target_root:
+ return
+
+ args_set = self._args_set
+ if not args_set.findAtomForPackage(pkg):
+ return
+
+ logger = self._logger
+ pkg_count = self._pkg_count
+ root_config = pkg.root_config
+ world_set = root_config.sets["selected"]
+ world_locked = False
+ atom = None
+
+ if pkg.operation != "uninstall":
+ atom = self._world_atoms.get(pkg)
+
+ try:
+
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
+ if hasattr(world_set, "load"):
+ world_set.load() # maybe it's changed on disk
+
+ if pkg.operation == "uninstall":
+ if hasattr(world_set, "cleanPackage"):
+ world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
+ pkg.cpv)
+ if hasattr(world_set, "remove"):
+ for s in pkg.root_config.setconfig.active:
+ world_set.remove(SETPREFIX+s)
+ else:
+ if atom is not None:
+ if hasattr(world_set, "add"):
+ self._status_msg(('Recording %s in "world" ' + \
+ 'favorites file...') % atom)
+ logger.log(" === (%s of %s) Updating world file (%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv))
+ world_set.add(atom)
+ else:
+ writemsg_level('\n!!! Unable to record %s in "world"\n' % \
+ (atom,), level=logging.WARN, noiselevel=-1)
+ finally:
+ if world_locked:
+ world_set.unlock()
+
+ def _pkg(self, cpv, type_name, root_config, installed=False,
+ operation=None, myrepo=None):
+ """
+ Get a package instance from the cache, or create a new
+ one if necessary. Raises KeyError from aux_get if it
+ failures for some reason (package does not exist or is
+ corrupt).
+ """
+
+ # Reuse existing instance when available.
+ pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
+ type_name=type_name, repo_name=myrepo, root_config=root_config,
+ installed=installed, operation=operation))
+
+ if pkg is not None:
+ return pkg
+
+ tree_type = depgraph.pkg_tree_map[type_name]
+ db = root_config.trees[tree_type].dbapi
+ db_keys = list(self.trees[root_config.root][
+ tree_type].dbapi._aux_cache_keys)
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+ pkg = Package(built=(type_name != "ebuild"),
+ cpv=cpv, installed=installed, metadata=metadata,
+ root_config=root_config, type_name=type_name)
+ self._pkg_cache[pkg] = pkg
+ return pkg
diff --git a/gosbs/_emerge/actions.py b/gosbs/_emerge/actions.py
index db230cc..20d56db 100644
--- a/gosbs/_emerge/actions.py
+++ b/gosbs/_emerge/actions.py
@@ -94,6 +94,7 @@ from _emerge.UnmergeDepPriority import UnmergeDepPriority
from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.UserQuery import UserQuery
+from gosbs.portage import create_trees as gosbs_create_trees
from gosbs.builder.wrapper_depgraph import build_mydepgraph
if sys.hexversion >= 0x3000000:
@@ -2493,7 +2494,7 @@ def load_emerge_config(emerge_config=None, env=None, **kargs):
v = env.get(envvar)
if v is not None:
kwargs[k] = v
- emerge_config.trees = portage.create_trees(trees=emerge_config.trees,
+ emerge_config.trees = gosbs_create_trees(trees=emerge_config.trees,
**kwargs)
for root_trees in emerge_config.trees.values():
diff --git a/gosbs/builder/binary.py b/gosbs/builder/binary.py
new file mode 100644
index 0000000..f0c55b9
--- /dev/null
+++ b/gosbs/builder/binary.py
@@ -0,0 +1,296 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from datetime import datetime
+import os
+import pytz
+
+import portage
+from portage.cache.mappings import slot_dict_class
+
+from oslo_log import log as logging
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def destroy_local_binary(context, build_job, project_db, service_uuid, mysettings):
+ filters = {
+ 'ebuild_uuid' : build_job['ebuild'].uuid,
+ 'project_uuid' : project_db.uuid,
+ 'service_uuid' : service_uuid,
+ }
+ for local_binary_db in objects.binary.BinaryList.get_all(context, filters=filters):
+ local_binary_db.destroy(context)
+ binfile = mysettings['PKGDIR'] + "/" + build_job['cpv'] + ".tbz2"
+ try:
+ os.remove(binfile)
+ except:
+ LOG.error("Package file was not removed or not found: %s" % binfile)
+
+def touch_pkg_in_db(context, pkg, objectsstor=False):
+ if objectsstor:
+ service_ref = objects.Service.get_by_topic(context, 'scheduler')
+ else:
+ service_ref = objects.Service.get_by_host_and_topic(context, CONF.host, 'builder')
+ filters = { 'build_id' : pkg.cpv.build_id,
+ 'service_uuid' : service_ref.uuid,
+ }
+ local_binary_db = objects.Binary.get_by_cpv(context, pkg.cpv, filters=filters)
+ if local_binary_db is None:
+ return
+ local_binary_db.updated_at = datetime.now().replace(tzinfo=pytz.UTC)
+ local_binary_db.save(context)
+ LOG.info('Touching %s in the binary database', pkg.cpv)
+
+class PackageIndex(object):
+
+ def __init__(self,
+ allowed_pkg_keys=None,
+ default_header_data=None,
+ default_pkg_data=None,
+ inherited_keys=None,
+ translated_keys=None,
+ objectsstor=False,
+ context=None,
+ ):
+
+ self._pkg_slot_dict = None
+ if allowed_pkg_keys is not None:
+ self._pkg_slot_dict = slot_dict_class(allowed_pkg_keys)
+
+ self._default_header_data = default_header_data
+ self._default_pkg_data = default_pkg_data
+ self._inherited_keys = inherited_keys
+ self._write_translation_map = {}
+ self._read_translation_map = {}
+ if translated_keys:
+ self._write_translation_map.update(translated_keys)
+ self._read_translation_map.update(((y, x) for (x, y) in translated_keys))
+ self.header = {}
+ if self._default_header_data:
+ self.header.update(self._default_header_data)
+ self.packages = []
+ self.modified = True
+ self.context = context
+ self.project_ref = objects.Project.get_by_name(self.context, CONF.builder.project)
+ if objectsstor:
+ self.service_ref = objects.Service.get_by_topic(self.context, 'scheduler')
+ else:
+ self.service_ref = objects.Service.get_by_host_and_topic(self.context, CONF.host, 'builder')
+
+ def _read_header_from_db(self):
+ binary_header_db = objects.BinaryHeader.get_by_service_uuid(self.context, self.service_ref.uuid)
+ if binary_header_db is None:
+ return self.header
+ header = {}
+ header['repository'] = binary_header_db.repository
+ header['ARCH'] = binary_header_db.arch
+ header['ACCEPT_KEYWORDS'] = binary_header_db.accept_keywords
+ header['ACCEPT_LICENSE'] = binary_header_db.accept_license
+ header['ACCEPT_PROPERTIES'] = binary_header_db.accept_properties
+ header['ACCEPT_RESTRICT'] = binary_header_db.accept_restrict
+ header['CBUILD'] = binary_header_db.cbuild
+ header['CONFIG_PROTECT'] = binary_header_db.config_protect
+ header['CONFIG_PROTECT_MASK'] = binary_header_db.config_protect_mask
+ header['FEATURES'] = binary_header_db.features
+ header['GENTOO_MIRRORS'] = binary_header_db.gentoo_mirrors
+ #binary_header_db.install_mask = header[]
+ header['IUSE_IMPLICIT'] = binary_header_db.iuse_implicit
+ header['USE'] = binary_header_db.use
+ header['USE_EXPAND'] = binary_header_db.use_expand
+ header['USE_EXPAND_HIDDEN'] = binary_header_db.use_expand_hidden
+ header['USE_EXPAND_IMPLICIT'] = binary_header_db.use_expand_implicit
+ header['USE_EXPAND_UNPREFIXED'] = binary_header_db.use_expand_unprefixed
+ header['USE_EXPAND_VALUES_ARCH'] = binary_header_db.use_expand_values_arch
+ header['USE_EXPAND_VALUES_ELIBC'] = binary_header_db.use_expand_values_elibc
+ header['USE_EXPAND_VALUES_KERNEL'] = binary_header_db.use_expand_values_kernel
+ header['USE_EXPAND_VALUES_USERLAND'] = binary_header_db.use_expand_values_userland
+ header['ELIBC'] = binary_header_db.elibc
+ header['KERNEL'] = binary_header_db.kernel
+ header['USERLAND'] = binary_header_db.userland
+ header['PACKAGES'] = binary_header_db.packages
+ header['PROFILE'] = binary_header_db.profile
+ header['VERSION'] = binary_header_db.version
+ header['TIMESTAMP'] = binary_header_db.updated_at
+ return header
+
+ def _read_pkg_from_db(self, binary_db):
+ pkg = {}
+ pkg['repository'] = binary_db.repository
+ pkg['CPV'] = binary_db.cpv
+ pkg['RESTRICT'] = binary_db.restrictions
+ pkg['DEPEND'] = binary_db.depend
+ pkg['BDEPEND'] = binary_db.bdepend
+ pkg['RDEPEND'] = binary_db.rdepend
+ pkg['PDEPEND'] = binary_db.pdepend
+ pkg['_mtime_'] = binary_db.mtime
+ pkg['LICENSE'] = binary_db.license
+ pkg['CHOST'] = binary_db.chost
+ pkg['SHA1'] = binary_db.sha1
+ pkg['DEFINED_PHASES'] = binary_db.defined_phases
+ pkg['SIZE'] = binary_db.size
+ pkg['EAPI'] = binary_db.eapi
+ pkg['PATH'] = binary_db.path
+ pkg['BUILD_ID'] = binary_db.build_id
+ pkg['SLOT'] = binary_db.slot
+ pkg['MD5'] = binary_db.md5
+ pkg['BUILD_TIME'] = binary_db.build_time
+ pkg['IUSE'] = binary_db.iuses
+ pkg['PROVIDES'] = binary_db.provides
+ pkg['KEYWORDS'] = binary_db.keywords
+ pkg['REQUIRES'] = binary_db.requires
+ pkg['USE'] = binary_db.uses
+ return pkg
+
+ def read(self):
+ self.readHeader()
+ self.readBody()
+
+ def readHeader(self):
+ self.header.update(self._read_header_from_db())
+
+ def readBody(self):
+ filters = {
+ 'service_uuid' : self.service_ref.uuid,
+ }
+ for binary_db in objects.BinaryList.get_all(self.context, filters=filters):
+ self.packages.append(self._read_pkg_from_db(binary_db))
+
+ def _write_header_to_db(self):
+ binary_header_db = objects.BinaryHeader()
+ binary_header_db.project_uuid = self.project_ref.uuid
+ binary_header_db.service_uuid = self.service_ref.uuid
+ binary_header_db.repository = self.header['repository']
+ binary_header_db.arch = self.header['ARCH']
+ binary_header_db.accept_keywords = self.header['ACCEPT_KEYWORDS']
+ binary_header_db.accept_license = self.header['ACCEPT_LICENSE']
+ binary_header_db.accept_properties = self.header['ACCEPT_PROPERTIES']
+ binary_header_db.accept_restrict = self.header['ACCEPT_RESTRICT']
+ binary_header_db.cbuild = self.header['CBUILD']
+ binary_header_db.config_protect = self.header['CONFIG_PROTECT']
+ binary_header_db.config_protect_mask = self.header['CONFIG_PROTECT_MASK']
+ binary_header_db.features = self.header['FEATURES']
+ binary_header_db.gentoo_mirrors = self.header['GENTOO_MIRRORS']
+ #binary_header_db.install_mask = header[]
+ binary_header_db.iuse_implicit = self.header['IUSE_IMPLICIT']
+ binary_header_db.use = self.header['USE']
+ binary_header_db.use_expand = self.header['USE_EXPAND']
+ binary_header_db.use_expand_hidden = self.header['USE_EXPAND_HIDDEN']
+ binary_header_db.use_expand_implicit = self.header['USE_EXPAND_IMPLICIT']
+ binary_header_db.use_expand_unprefixed = self.header['USE_EXPAND_UNPREFIXED']
+ binary_header_db.use_expand_values_arch = self.header['USE_EXPAND_VALUES_ARCH']
+ binary_header_db.use_expand_values_elibc = self.header['USE_EXPAND_VALUES_ELIBC']
+ binary_header_db.use_expand_values_kernel = self.header['USE_EXPAND_VALUES_KERNEL']
+ binary_header_db.use_expand_values_userland = self.header['USE_EXPAND_VALUES_USERLAND']
+ binary_header_db.elibc = self.header['ELIBC']
+ binary_header_db.kernel = self.header['KERNEL']
+ binary_header_db.userland = self.header['USERLAND']
+ binary_header_db.packages = self.header['PACKAGES']
+ binary_header_db.profile = self.header['PROFILE']
+ binary_header_db.version = self.header['VERSION']
+ binary_header_db.updated_at = datetime.now().replace(tzinfo=pytz.UTC)
+ binary_header_db.create(self.context)
+
+ def _update_header_in_db(self, binary_header_db):
+ binary_header_db.repository = self.header['repository']
+ binary_header_db.arch = self.header['ARCH']
+ binary_header_db.accept_keywords = self.header['ACCEPT_KEYWORDS']
+ binary_header_db.accept_license = self.header['ACCEPT_LICENSE']
+ binary_header_db.accept_properties = self.header['ACCEPT_PROPERTIES']
+ binary_header_db.accept_restrict = self.header['ACCEPT_PROPERTIES']
+ binary_header_db.cbuild = self.header['CBUILD']
+ binary_header_db.config_protect = self.header['CONFIG_PROTECT']
+ binary_header_db.config_protect_mask = self.header['CONFIG_PROTECT_MASK']
+ binary_header_db.features = self.header['FEATURES']
+ binary_header_db.gentoo_mirrors = self.header['GENTOO_MIRRORS']
+ #binary_header_db.install_mask = header[]
+ binary_header_db.iuse_implicit = self.header['IUSE_IMPLICIT']
+ binary_header_db.use = self.header['USE']
+ binary_header_db.use_expand = self.header['USE_EXPAND']
+ binary_header_db.use_expand_hidden = self.header['USE_EXPAND_HIDDEN']
+ binary_header_db.use_expand_implicit = self.header['USE_EXPAND_IMPLICIT']
+ binary_header_db.use_expand_unprefixed = self.header['USE_EXPAND_UNPREFIXED']
+ binary_header_db.use_expand_values_arch = self.header['USE_EXPAND_VALUES_ARCH']
+ binary_header_db.use_expand_values_elibc = self.header['USE_EXPAND_VALUES_ELIBC']
+ binary_header_db.use_expand_values_kernel = self.header['USE_EXPAND_VALUES_KERNEL']
+ binary_header_db.use_expand_values_userland = self.header['USE_EXPAND_VALUES_USERLAND']
+ binary_header_db.elibc = self.header['ELIBC']
+ binary_header_db.kernel = self.header['KERNEL']
+ binary_header_db.userland = self.header['USERLAND']
+ binary_header_db.packages = self.header['PACKAGES']
+ binary_header_db.profile = self.header['PROFILE']
+ binary_header_db.version = self.header['VERSION']
+ binary_header_db.save(self.context)
+
+ def _update_header_packages(self):
+ header = self.header
+ header['PACKAGES'] = len(self.packages)
+ return header
+
+ def _update_header(self):
+ self.header.update(self._update_header_packages())
+ binary_header_db = objects.BinaryHeader.get_by_service_uuid(self.context, self.service_ref.uuid)
+ if binary_header_db is None:
+ self._write_header_to_db()
+ LOG.info('Adding header to the binary database')
+ return
+ self._update_header_in_db(binary_header_db)
+ LOG.info('Update header in the binary database')
+
+ def _write_pkg_to_db(self, pkg):
+ ebuild_version_tree = portage.versions.cpv_getversion(pkg['CPV'])
+ cp = portage.versions.cpv_getkey(pkg['CPV']).split('/')
+ category_db = objects.category.Category.get_by_name(self.context, cp[0])
+ repo_db = objects.repo.Repo.get_by_name(self.context, pkg['repository'])
+ filters = { 'repo_uuid' : repo_db.uuid,
+ 'category_uuid' : category_db.uuid,
+ }
+ package_db = objects.package.Package.get_by_name(self.context, cp[1], filters=filters)
+ filters = { 'package_uuid' : package_db.uuid,
+ }
+ ebuild_db = objects.ebuild.Ebuild.get_by_name(self.context, ebuild_version_tree, filters=filters)
+ local_binary_db = objects.binary.Binary()
+ local_binary_db.ebuild_uuid = ebuild_db.uuid
+ local_binary_db.repository = pkg['repository']
+ local_binary_db.project_uuid = self.project_ref.uuid
+ local_binary_db.service_uuid = self.service_ref.uuid
+ local_binary_db.cpv = pkg['CPV']
+ local_binary_db.restrictions = pkg['RESTRICT']
+ local_binary_db.depend = pkg['DEPEND']
+ local_binary_db.bdepend = pkg['BDEPEND']
+ local_binary_db.rdepend = pkg['RDEPEND']
+ local_binary_db.pdepend = pkg['PDEPEND']
+ local_binary_db.mtime = pkg['_mtime_']
+ local_binary_db.license = pkg['LICENSE']
+ local_binary_db.chost = pkg['CHOST']
+ local_binary_db.sha1 = pkg['SHA1']
+ local_binary_db.defined_phases = pkg['DEFINED_PHASES']
+ local_binary_db.size = pkg['SIZE']
+ local_binary_db.eapi = pkg['EAPI']
+ local_binary_db.path = pkg['PATH']
+ local_binary_db.build_id = pkg['BUILD_ID']
+ local_binary_db.slot = pkg['SLOT']
+ local_binary_db.md5 = pkg['MD5']
+ local_binary_db.build_time = pkg['BUILD_TIME']
+ local_binary_db.iuses = pkg['IUSE']
+ local_binary_db.uses = pkg['USE']
+ local_binary_db.provides = pkg['PROVIDES']
+ local_binary_db.keywords = pkg['KEYWORDS']
+ local_binary_db.requires = pkg['REQUIRES']
+ local_binary_db.updated_at = datetime.now().replace(tzinfo=pytz.UTC)
+ local_binary_db.create(self.context)
+
+ def write(self, pkg):
+ self._update_header()
+ if pkg is not None:
+ filters = { 'build_id' : pkg['BUILD_ID'],
+ 'service_uuid' : self.service_ref.uuid,
+ }
+ local_binary_db = objects.Binary.get_by_cpv(self.context, pkg['CPV'], filters=filters)
+ if local_binary_db is None:
+ self._write_pkg_to_db(pkg)
+ LOG.info('Adding %s to the binary database', pkg['CPV'])
+ return
+ LOG.info('%s is already in the binary database', pkg['CPV'])
diff --git a/gosbs/builder/build_checker.py b/gosbs/builder/build_checker.py
new file mode 100644
index 0000000..e92c33f
--- /dev/null
+++ b/gosbs/builder/build_checker.py
@@ -0,0 +1,17 @@
+# Copyright 1998-2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from oslo_log import log as logging
+
+from gosbs import objects
+import gosbs.conf
+from gosbs.context import get_admin_context
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def check_build(settings, pkg, trees):
+ context = get_admin_context()
+ service_ref = objects.Service.get_by_host_and_topic(context, CONF.host, "builder")
+ project_db = objects.project.Project.get_by_name(context, CONF.builder.project)
+ project_metadata_db = objects.project_metadata.ProjectMetadata.get_by_uuid(context, project_db.uuid)
diff --git a/gosbs/common/binary.py b/gosbs/common/binary.py
deleted file mode 100644
index 7d6ac46..0000000
--- a/gosbs/common/binary.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 1999-2020 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-from oslo_log import log as logging
-from gosbs import objects
-import gosbs.conf
-
-CONF = gosbs.conf.CONF
-LOG = logging.getLogger(__name__)
-
-def destroy_local_binary(context, build_job, project_db, service_uuid, mysettings):
- filters = {
- 'ebuild_uuid' : build_job['ebuild'].uuid,
- 'project_uuid' : project_db.uuid,
- 'service_uuid' : service_uuid,
- }
- for local_binary_db in objects.local_binary.LocalBinaryList.get_all(context, filters=filters):
- local_binary_db.destroy(context)
- binfile = mysettings['PKGDIR'] + "/" + build_job['cpv'] + ".tbz2"
- try:
- os.remove(binfile)
- except:
- LOG.error("Package file was not removed or not found: %s" % binfile)
-
-def destroy_objectstor_binary(context, build_job, project_db):
- filters = {
- 'ebuild_uuid' : build_job['ebuild'].uuid,
- 'project_uuid' : project_db.uuid,
- }
- for objectstor_binary_db in objects.objectstor_binary.ObjectStorBinaryList.get_all(context, filters=filters):
- objectstor_binary_db.destroy(context)
- # Fixme: remove the file on ObjectStor
diff --git a/gosbs/db/sqlalchemy/models.py b/gosbs/db/sqlalchemy/models.py
index 1f7d847..8ea9792 100644
--- a/gosbs/db/sqlalchemy/models.py
+++ b/gosbs/db/sqlalchemy/models.py
@@ -438,36 +438,85 @@ class ServicesRepos(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixi
status = Column(Enum('failed', 'completed', 'in-progress', 'waiting', 'update_db', 'rebuild_db'),
nullable=True)
-class LocalBinarys(BASE, NovaBase, models.TimestampMixin):
+class Binarys(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
"""Represents an image in the datastore."""
- __tablename__ = 'local_binarys'
+ __tablename__ = 'binarys'
__table_args__ = (
)
uuid = Column(String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
- name = Column(String(255))
- project_uuid = Column(String(36), ForeignKey('projects.uuid'), nullable=False,
+ project_uuid = Column(String(36), ForeignKey('projects.uuid'),
default=lambda: str(uuid.uuid4()))
- ebuild_uuid = Column(String(36), ForeignKey('ebuilds.uuid'), nullable=False,
+ ebuild_uuid = Column(String(36), ForeignKey('ebuilds.uuid'),
default=lambda: str(uuid.uuid4()))
service_uuid = Column(String(36), ForeignKey('services.uuid'),
default=lambda: str(uuid.uuid4()))
- checksum = Column(String(200))
+ repository = Column(String(255), nullable=True)
+ cpv = Column(String(255), nullable=True)
+ restrictions = Column(String(255), nullable=True)
+ depend = Column((Text), nullable=True)
+ bdepend = Column((Text), nullable=True)
+ rdepend = Column((Text), nullable=True)
+ pdepend = Column((Text), nullable=True)
+ mtime = Column(Integer)
+ license = Column(String(255), nullable=True)
+ chost = Column(String(255), nullable=True)
+ sha1 = Column(String(255), nullable=True)
+ defined_phases = Column((Text), nullable=True)
+ size = Column(Integer)
+ eapi = Column(String(255), nullable=True)
+ path = Column(String(255), nullable=True)
+ build_id = Column(Integer)
+ slot = Column(String(255), nullable=True)
+ md5 = Column(String(255), nullable=True)
+ build_time = Column(Integer)
+ iuses = Column((Text), nullable=True)
+ uses = Column((Text), nullable=True)
+ provides = Column((Text), nullable=True)
+ keywords = Column((Text), nullable=True)
+ requires = Column((Text), nullable=True)
+ restrictions = Column((Text), nullable=True)
looked = Column(Boolean(), default=False)
-class ObjectStorBinarys(BASE, NovaBase, models.TimestampMixin):
+class BinarysHeaders(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
"""Represents an image in the datastore."""
- __tablename__ = 'objectstor_binarys'
+ __tablename__ = 'binarys_headers'
__table_args__ = (
)
uuid = Column(String(36), primary_key=True,
default=lambda: str(uuid.uuid4()))
- name = Column(String(255))
- project_uuid = Column(String(36), ForeignKey('projects.uuid'), nullable=False,
- default=lambda: str(uuid.uuid4()))
- ebuild_uuid = Column(String(36), ForeignKey('ebuilds.uuid'), nullable=False,
+ project_uuid = Column(String(36), ForeignKey('projects.uuid'),
default=lambda: str(uuid.uuid4()))
- checksum = Column(String(200))
+ service_uuid = Column(String(36), ForeignKey('services.uuid'),
+ default=lambda: str(uuid.uuid4()))
+ repository = Column((Text), nullable=True)
+ arch = Column(String(255), nullable=True)
+ accept_keywords = Column(String(255), nullable=True)
+ accept_license = Column((Text), nullable=True)
+ accept_properties = Column((Text), nullable=True)
+ accept_restrict = Column((Text), nullable=True)
+ cbuild = Column(String(255), nullable=True)
+ config_protect = Column((Text), nullable=True)
+ config_protect_mask = Column((Text), nullable=True)
+ features = Column((Text), nullable=True)
+ gentoo_mirrors = Column((Text), nullable=True)
+ #install_mask = Column((Text), nullable=True)
+ iuse_implicit = Column((Text), nullable=True)
+ use = Column((Text), nullable=True)
+ use_expand = Column((Text), nullable=True)
+ use_expand_hidden = Column((Text), nullable=True)
+ use_expand_implicit = Column((Text), nullable=True)
+ use_expand_unprefixed = Column((Text), nullable=True)
+ use_expand_values_arch = Column((Text), nullable=True)
+ use_expand_values_elibc = Column((Text), nullable=True)
+ use_expand_values_kernel = Column((Text), nullable=True)
+ use_expand_values_userland = Column((Text), nullable=True)
+ elibc = Column(String(255), nullable=True)
+ kernel = Column(String(255), nullable=True)
+ userland = Column(String(255), nullable=True)
+ packages = Column(Integer)
+ profile = Column(String(255), nullable=True)
+ version = Column(String(255), nullable=True)
looked = Column(Boolean(), default=False)
diff --git a/gosbs/objects/__init__.py b/gosbs/objects/__init__.py
index 50e2e38..b9a42c9 100644
--- a/gosbs/objects/__init__.py
+++ b/gosbs/objects/__init__.py
@@ -35,14 +35,15 @@ def register_all():
__import__('gosbs.objects.email')
__import__('gosbs.objects.keyword')
__import__('gosbs.objects.package')
- __import__('gosbs.objects.local_package')
- __import__('gosbs.objects.objectstor_package')
+ __import__('gosbs.objects.binary')
+ __import__('gosbs.objects.binary_header')
__import__('gosbs.objects.package_metadata')
__import__('gosbs.objects.package_email')
__import__('gosbs.objects.project')
__import__('gosbs.objects.project_metadata')
__import__('gosbs.objects.project_build')
__import__('gosbs.objects.project_repo')
+ __import__('gosbs.objects.project_option')
__import__('gosbs.objects.repo')
__import__('gosbs.objects.restriction')
__import__('gosbs.objects.task')
diff --git a/gosbs/objects/binary.py b/gosbs/objects/binary.py
new file mode 100644
index 0000000..7894ece
--- /dev/null
+++ b/gosbs/objects/binary.py
@@ -0,0 +1,350 @@
+# Copyright 2013 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_versionedobjects import fields
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(model):
+ extra_specs = {}
+ return dict(model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _binary_create(context, values):
+ db_binary = models.Binarys()
+ db_binary.update(values)
+
+ try:
+ db_binary.save(context.session)
+ except db_exc.DBDuplicateEntry as e:
+ if 'local_binaryuuid' in e.columns:
+ raise exception.ImagesIdExists(binary_uuid=values['binaryuuid'])
+ raise exception.ImagesExists(name=values['name'])
+ except Exception as e:
+ raise db_exc.DBError(e)
+
+ return _dict_with_extra_specs(db_binary)
+
+
+@db_api.main_context_manager.writer
+def _binary_destroy(context, binary_uuid=None, binaryuuid=None):
+ query = context.session.query(models.Binarys)
+
+ if binary_uuid is not None:
+ query.filter(models.Binarys.uuid == binary_uuid).delete()
+ else:
+ query.filter(models.Binarys.uuid == binaryuuid).delete()
+
+
+@base.NovaObjectRegistry.register
+class Binary(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+ # Version 1.0: Initial version
+
+ VERSION = '1.0'
+
+ fields = {
+ 'uuid' : fields.UUIDField(),
+ 'project_uuid' : fields.UUIDField(),
+ 'service_uuid' : fields.UUIDField(),
+ 'ebuild_uuid' : fields.UUIDField(),
+ 'repository' : fields.StringField(nullable=True),
+ 'cpv' : fields.StringField(nullable=True),
+ 'restrictions' : fields.StringField(nullable=True),
+ 'depend' : fields.StringField(nullable=True),
+ 'bdepend' : fields.StringField(nullable=True),
+ 'rdepend' : fields.StringField(nullable=True),
+ 'pdepend' : fields.StringField(nullable=True),
+ 'mtime' : fields.IntegerField(),
+ 'license' : fields.StringField(nullable=True),
+ 'chost' : fields.StringField(nullable=True),
+ 'sha1' : fields.StringField(nullable=True),
+ 'defined_phases' : fields.StringField(nullable=True),
+ 'size' : fields.IntegerField(),
+ 'eapi' : fields.StringField(nullable=True),
+ 'path' : fields.StringField(nullable=True),
+ 'build_id' : fields.IntegerField(),
+ 'slot' : fields.StringField(nullable=True),
+ 'md5' : fields.StringField(nullable=True),
+ 'build_time' : fields.IntegerField(),
+ 'iuses' : fields.StringField(nullable=True),
+ 'uses' : fields.StringField(nullable=True),
+ 'provides' : fields.StringField(nullable=True),
+ 'keywords' : fields.StringField(nullable=True),
+ 'requires' : fields.StringField(nullable=True),
+ 'looked' : fields.BooleanField(),
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(Binary, self).__init__(*args, **kwargs)
+ self._orig_extra_specs = {}
+ self._orig_binary = []
+
+ def obj_make_compatible(self, primitive, target_version):
+ super(Binary, self).obj_make_compatible(primitive, target_version)
+ target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+ @staticmethod
+ def _from_db_object(context, binary, db_binary, expected_attrs=None):
+ if expected_attrs is None:
+ expected_attrs = []
+ binary._context = context
+ for name, field in binary.fields.items():
+ value = db_binary[name]
+ if isinstance(field, fields.IntegerField):
+ value = value if value is not None else 0
+ binary[name] = value
+
+ binary.obj_reset_changes()
+ return binary
+
+ @staticmethod
+ @db_api.main_context_manager.reader
+ def _binary_get_query_from_db(context):
+ query = context.session.query(models.Binarys)
+ return query
+
+ @staticmethod
+ @require_context
+ def _binary_get_from_db(context, uuid):
+ """Returns a dict describing specific binaryss."""
+ result = Binary._binary_get_query_from_db(context).\
+ filter_by(uuid=uuid).\
+ first()
+ if not result:
+ raise exception.ImagesNotFound(binary_uuid=uuid)
+ return result
+
+ @staticmethod
+ @require_context
+ def _binarys_get_by_name_from_db(context, name):
+ """Returns a dict describing specific flavor."""
+ result = Binary._binary_get_query_from_db(context).\
+ filter_by(name=name).\
+ first()
+ if not result:
+ raise exception.FlavorNotFoundByName(binarys_name=name)
+ return _dict_with_extra_specs(result)
+
+ @staticmethod
+ @require_context
+ def _binary_get_by_uuid_from_db(context, uuid):
+ """Returns a dict describing specific flavor."""
+ result = Binary._binary_get_query_from_db(context).\
+ filter_by(uuid=uuid).\
+ first()
+ if not result:
+ raise exception.FlavorNotFoundByName(binarys_name=name)
+ return _dict_with_extra_specs(result)
+
+ def obj_reset_changes(self, fields=None, recursive=False):
+ super(Binary, self).obj_reset_changes(fields=fields,
+ recursive=recursive)
+
+ def obj_what_changed(self):
+ changes = super(Binary, self).obj_what_changed()
+ return changes
+
+ @base.remotable_classmethod
+ def get_by_uuid(cls, context, uuid):
+ db_binary = cls._binary_get_from_db(context, uuid)
+ return cls._from_db_object(context, cls(context), db_binary,
+ expected_attrs=[])
+ @base.remotable_classmethod
+ def get_by_name(cls, context, name):
+ db_binary = cls._binary_get_by_name_from_db(context, name)
+ return cls._from_db_object(context, cls(context), db_binary,
+ expected_attrs=[])
+
+ @base.remotable_classmethod
+ def get_by_cpv(cls, context, cpv, filters=None):
+ filters = filters or {}
+ db_binary = cls._binary_get_query_from_db(context)
+ db_binary = db_binary.filter_by(cpv=cpv)
+ if 'service_uuid' in filters:
+ db_binary = db_binary.filter(
+ models.Binarys.service_uuid == filters['service_uuid'])
+ if 'cpv' in filters:
+ db_binary = db_binary.filter(
+ models.Binarys.cpv == filters['cpv'])
+ if 'build_id' in filters:
+ db_binary = db_binary.filter(
+ models.Binarys.build_id == filters['build_id'])
+ db_binary = db_binary.first()
+ if not db_binary:
+ return None
+ return cls._from_db_object(context, cls(context), db_binary,
+ expected_attrs=[])
+
+ @staticmethod
+ def _binary_create(context, updates):
+ return _binary_create(context, updates)
+
+ #@base.remotable
+ def create(self, context):
+ #if self.obj_attr_is_set('id'):
+ # raise exception.ObjectActionError(action='create',
+ #reason='already created')
+ updates = self.obj_get_changes()
+ db_binary = self._binary_create(context, updates)
+ self._from_db_object(context, self, db_binary)
+
+
+ # NOTE(mriedem): This method is not remotable since we only expect the API
+ # to be able to make updates to a binaryss.
+ @db_api.main_context_manager.writer
+ def _save(self, context, values):
+ db_binary = context.session.query(models.Binarys).\
+ filter_by(uuid=self.uuid).first()
+ if not db_binary:
+ raise exception.ImagesNotFound(binary_uuid=self.uuid)
+ db_binary.update(values)
+ db_binary.save(context.session)
+ # Refresh ourselves from the DB object so we get the new updated_at.
+ self._from_db_object(context, self, db_binary)
+ self.obj_reset_changes()
+
+ def save(self, context):
+ updates = self.obj_get_changes()
+ if updates:
+ self._save(context, updates)
+
+ @staticmethod
+ def _binary_destroy(context, binary_uuid=None, binaryuuid=None):
+ _binary_destroy(context, binary_uuid=binary_uuid, binaryuuid=binaryuuid)
+
+ #@base.remotable
+ def destroy(self, context):
+ # NOTE(danms): Historically the only way to delete a binaryss
+ # is via name, which is not very precise. We need to be able to
+ # support the light construction of a binaryss object and subsequent
+ # delete request with only our name filled out. However, if we have
+ # our id property, we should instead delete with that since it's
+ # far more specific.
+ if 'uuid' in self:
+ self._binary_destroy(context, binary_uuid=self.uuid)
+ else:
+ self._binary_destroy(context, binaryuuid=self.binaryuuid)
+ #self._from_db_object(context, self, db_binary)
+
+ @base.remotable_classmethod
+ def get_by_filters_first(cls, context, filters=None):
+ filters = filters or {}
+ query = Binary._binary_get_query_from_db(context)
+ if not query:
+ return None
+ print(query)
+ #return None
+ if 'service_uuid' in filters:
+ query = query.filter(
+ models.Binarys.service_uuid == filters['service_uuid'])
+ if 'cpv' in filters:
+ query = query.filter(
+ models.Binarys.cpv == filters['cpv'])
+ if 'build_id' in filters:
+ query = query.filter(
+ models.Binarys.build_id == filters['build_id'])
+ if not query:
+ return None
+
+ return cls._from_db_object(context, cls(context), query,
+ expected_attrs=[])
+
+@db_api.main_context_manager
+def _binary_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+ limit, marker):
+ """Returns all binarys.
+ """
+ filters = filters or {}
+
+ query = Binary._binary_get_query_from_db(context)
+
+ if 'ebuild_uuid' in filters:
+ query = query.filter(
+ models.Binarys.ebuild_uuid == filters['ebuild_uuid'])
+ if 'project_uuid' in filters:
+ query = query.filter(
+ models.Binarys.project_uuid == filters['project_uuid'])
+ if 'service_uuid' in filters:
+ query = query.filter(
+ models.Binarys.service_uuid == filters['service_uuid'])
+ if 'cpv' in filters:
+ query = query.filter(
+ models.Binarys.cpv == filters['cpv'])
+ if 'build_id' in filters:
+ query = query.filter(
+ models.Binarys.build_id == filters['build_id'])
+ marker_row = None
+ if marker is not None:
+ marker_row = Binary._binary_get_query_from_db(context).\
+ filter_by(uuid=marker).\
+ first()
+ if not marker_row:
+ raise exception.MarkerNotFound(marker=marker)
+
+ query = sqlalchemyutils.paginate_query(query, models.Binarys,
+ limit,
+ [sort_key, 'uuid'],
+ marker=marker_row,
+ sort_dir=sort_dir)
+ return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class BinaryList(base.ObjectListBase, base.NovaObject):
+ VERSION = '1.0'
+
+ fields = {
+ 'objects': fields.ListOfObjectsField('Binary'),
+ }
+
+ @base.remotable_classmethod
+ def get_all(cls, context, inactive=False, filters=None,
+ sort_key='uuid', sort_dir='asc', limit=None, marker=None):
+ db_binarys = _binary_get_all_from_db(context,
+ inactive=inactive,
+ filters=filters,
+ sort_key=sort_key,
+ sort_dir=sort_dir,
+ limit=limit,
+ marker=marker)
+ return base.obj_make_list(context, cls(context), objects.binary.Binary,
+ db_binarys,
+ expected_attrs=[])
+
+ @db_api.main_context_manager.writer
+ def destroy_all(context):
+ context.session.query(models.Binarys).delete()
+
+ @db_api.main_context_manager.writer
+ def update_all(context):
+ values = {'status': 'waiting', }
+ db_binary = context.session.query(models.Binarys).filter_by(auto=True)
+ db_binary.update(values)
diff --git a/gosbs/objects/binary_header.py b/gosbs/objects/binary_header.py
new file mode 100644
index 0000000..2cd3948
--- /dev/null
+++ b/gosbs/objects/binary_header.py
@@ -0,0 +1,362 @@
+# Copyright 2013 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_versionedobjects import fields
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(model):
+ extra_specs = {}
+ return dict(model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _binary_header_create(context, values):
+ db_binary_header = models.BinarysHeaders()
+ db_binary_header.update(values)
+
+ try:
+ db_binary_header.save(context.session)
+ except db_exc.DBDuplicateEntry as e:
+ if 'local_binary_headeruuid' in e.columns:
+ raise exception.ImagesIdExists(binary_header_uuid=values['binary_header_headeruuid'])
+ raise exception.ImagesExists(name=values['name'])
+ except Exception as e:
+ raise db_exc.DBError(e)
+
+ return _dict_with_extra_specs(db_binary_header)
+
+
+@db_api.main_context_manager.writer
+def _binary_header_header_destroy(context, binary_header_header_uuid=None, binary_header_headeruuid=None):
+ query = context.session.query(models.BinarysHeaders)
+
+ if binary_header_header_uuid is not None:
+ query.filter(models.BinarysHeaders.uuid == binary_header_header_uuid).delete()
+ else:
+ query.filter(models.BinarysHeaders.uuid == binary_header_headeruuid).delete()
+
+
+@base.NovaObjectRegistry.register
+class BinaryHeader(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+ # Version 1.0: Initial version
+
+ VERSION = '1.0'
+
+ fields = {
+ 'uuid' : fields.UUIDField(),
+ 'project_uuid' : fields.UUIDField(),
+ 'service_uuid' : fields.UUIDField(),
+ 'repository' : fields.StringField(nullable=True),
+ 'arch' : fields.StringField(nullable=True),
+ 'accept_keywords' : fields.StringField(nullable=True),
+ 'accept_license' : fields.StringField(nullable=True),
+ 'accept_properties' : fields.StringField(nullable=True),
+ 'accept_restrict' : fields.StringField(nullable=True),
+ 'cbuild' : fields.StringField(nullable=True),
+ 'config_protect' : fields.StringField(nullable=True),
+ 'config_protect_mask' : fields.StringField(nullable=True),
+ 'accept_keywords' : fields.StringField(nullable=True),
+ 'features' : fields.StringField(nullable=True),
+ 'gentoo_mirrors' : fields.StringField(nullable=True),
+ #'install_mask' : fields.StringField(nullable=True),
+ 'iuse_implicit' : fields.StringField(nullable=True),
+ 'use' : fields.StringField(nullable=True),
+ 'use_expand' : fields.StringField(nullable=True),
+ 'use_expand_hidden' : fields.StringField(nullable=True),
+ 'use_expand_implicit' : fields.StringField(nullable=True),
+ 'use_expand_unprefixed' : fields.StringField(nullable=True),
+ 'use_expand_values_arch' : fields.StringField(nullable=True),
+ 'use_expand_values_elibc' : fields.StringField(nullable=True),
+ 'use_expand_values_kernel' : fields.StringField(nullable=True),
+ 'use_expand_values_userland' : fields.StringField(nullable=True),
+ 'elibc' : fields.StringField(nullable=True),
+ 'kernel' : fields.StringField(nullable=True),
+ 'userland' : fields.StringField(nullable=True),
+ 'packages' : fields.IntegerField(),
+ 'profile' : fields.StringField(nullable=True),
+ 'version' : fields.StringField(nullable=True),
+ 'looked' : fields.BooleanField(),
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(BinaryHeader, self).__init__(*args, **kwargs)
+ self._orig_extra_specs = {}
+ self._orig_binary_header_header = []
+
+ def obj_make_compatible(self, primitive, target_version):
+ super(BinaryHeader, self).obj_make_compatible(primitive, target_version)
+ target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+ @staticmethod
+ def _from_db_object(context, binary_header, db_binary_header, expected_attrs=None):
+ if expected_attrs is None:
+ expected_attrs = []
+ binary_header._context = context
+ for name, field in binary_header.fields.items():
+ value = db_binary_header[name]
+ if isinstance(field, fields.IntegerField):
+ value = value if value is not None else 0
+ binary_header[name] = value
+
+ binary_header.obj_reset_changes()
+ return binary_header
+
+ @staticmethod
+ @db_api.main_context_manager.reader
+ def _binary_header_get_query_from_db(context):
+ query = context.session.query(models.BinarysHeaders)
+ return query
+
+ @staticmethod
+ @require_context
+ def _binary_header_get_from_db(context, uuid):
+ """Returns a dict describing specific binary_headerss."""
+ result = BinaryHeader._binary_header_get_query_from_db(context).\
+ filter_by(uuid=uuid).\
+ first()
+ if not result:
+ raise exception.ImagesNotFound(binary_header_uuid=uuid)
+ return result
+
+ @staticmethod
+ @require_context
+ def _binary_headers_get_by_name_from_db(context, name):
+ """Returns a dict describing specific flavor."""
+ result = BinaryHeader._binary_header_get_query_from_db(context).\
+ filter_by(name=name).\
+ first()
+ if not result:
+ raise exception.FlavorNotFoundByName(binary_headers_name=name)
+ return _dict_with_extra_specs(result)
+
+ @staticmethod
+ @require_context
+ def _binary_header_get_by_service_from_db(context, uuid):
+ """Returns a dict describing specific flavor."""
+ result = BinaryHeader._binary_header_get_query_from_db(context).\
+ filter_by(service_uuid=uuid).\
+ first()
+ if not result:
+ return None
+ return _dict_with_extra_specs(result)
+
+ def obj_reset_changes(self, fields=None, recursive=False):
+ super(BinaryHeader, self).obj_reset_changes(fields=fields,
+ recursive=recursive)
+
+ def obj_what_changed(self):
+ changes = super(BinaryHeader, self).obj_what_changed()
+ return changes
+
+ @base.remotable_classmethod
+ def get_by_uuid(cls, context, uuid):
+ db_binary_header = cls._binary_header_get_from_db(context, uuid)
+ return cls._from_db_object(context, cls(context), db_binary_header,
+ expected_attrs=[])
+ @base.remotable_classmethod
+ def get_by_service_uuid(cls, context, uuid):
+ db_binary_header = cls._binary_header_get_by_service_from_db(context, uuid)
+ if db_binary_header is None:
+ return None
+ return cls._from_db_object(context, cls(context), db_binary_header,
+ expected_attrs=[])
+
+ @base.remotable_classmethod
+ def get_by_name(cls, context, name):
+ db_binary_header = cls._binary_header_get_by_name_from_db(context, name)
+ return cls._from_db_object(context, cls(context), db_binary_header,
+ expected_attrs=[])
+
+ @base.remotable_classmethod
+ def get_by_cpv(cls, context, cpv, filters=None):
+ filters = filters or {}
+ db_binary_header = cls._binary_header_get_query_from_db(context)
+ db_binary_header = db_binary_header.filter_by(cpv=cpv)
+ if 'service_uuid' in filters:
+ db_binary_header = db_binary_header.filter(
+ models.BinarysHeaders.service_uuid == filters['service_uuid'])
+ if 'cpv' in filters:
+ db_binary_header = db_binary_header.filter(
+ models.BinarysHeaders.cpv == filters['cpv'])
+ if 'build_id' in filters:
+ db_binary_header = db_binary_header.filter(
+ models.BinarysHeaders.build_id == filters['build_id'])
+ db_binary_header = db_binary_header.first()
+ if not db_binary_header:
+ return None
+ return cls._from_db_object(context, cls(context), db_binary_header,
+ expected_attrs=[])
+
+ @staticmethod
+ def _binary_header_create(context, updates):
+ return _binary_header_create(context, updates)
+
+ #@base.remotable
+ def create(self, context):
+ #if self.obj_attr_is_set('id'):
+ # raise exception.ObjectActionError(action='create',
+ #reason='already created')
+ updates = self.obj_get_changes()
+ db_binary_header = self._binary_header_create(context, updates)
+ self._from_db_object(context, self, db_binary_header)
+
+
+ # NOTE(mriedem): This method is not remotable since we only expect the API
+ # to be able to make updates to a binary_headerss.
+ @db_api.main_context_manager.writer
+ def _save(self, context, values):
+ db_binary_header = context.session.query(models.BinarysHeaders).\
+ filter_by(uuid=self.uuid).first()
+ if not db_binary_header:
+ raise exception.ImagesNotFound(binary_header_uuid=self.uuid)
+ db_binary_header.update(values)
+ db_binary_header.save(context.session)
+ # Refresh ourselves from the DB object so we get the new updated_at.
+ self._from_db_object(context, self, db_binary_header)
+ self.obj_reset_changes()
+
+ def save(self, context):
+ updates = self.obj_get_changes()
+ if updates:
+ self._save(context, updates)
+
+ @staticmethod
+ def _binary_header_destroy(context, binary_header_uuid=None, binary_headeruuid=None):
+ _binary_header_destroy(context, binary_header_uuid=binary_header_uuid, binary_headeruuid=binary_headeruuid)
+
+ #@base.remotable
+ def destroy(self, context):
+ # NOTE(danms): Historically the only way to delete a binary_headerss
+ # is via name, which is not very precise. We need to be able to
+ # support the light construction of a binary_headerss object and subsequent
+ # delete request with only our name filled out. However, if we have
+ # our id property, we should instead delete with that since it's
+ # far more specific.
+ if 'uuid' in self:
+ self._binary_header_destroy(context, binary_header_uuid=self.uuid)
+ else:
+ self._binary_header_destroy(context, binary_headeruuid=self.binary_headeruuid)
+ #self._from_db_object(context, self, db_binary_header)
+
+ @base.remotable_classmethod
+ def get_by_filters_first(cls, context, filters=None):
+ filters = filters or {}
+ query = BinaryHeader._binary_header_get_query_from_db(context)
+ if not query:
+ return None
+ print(query)
+ #return None
+ if 'service_uuid' in filters:
+ query = query.filter(
+ models.BinarysHeaders.service_uuid == filters['service_uuid'])
+ if 'cpv' in filters:
+ query = query.filter(
+ models.BinarysHeaders.cpv == filters['cpv'])
+ if 'build_id' in filters:
+ query = query.filter(
+ models.BinarysHeaders.build_id == filters['build_id'])
+ if not query:
+ return None
+
+ return cls._from_db_object(context, cls(context), query,
+ expected_attrs=[])
+
+@db_api.main_context_manager
+def _binary_header_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+ limit, marker):
+ """Returns all binary_headers.
+ """
+ filters = filters or {}
+
+ query = BinaryHeader._binary_header_get_query_from_db(context)
+
+ if 'ebuild_uuid' in filters:
+ query = query.filter(
+ models.BinarysHeaders.ebuild_uuid == filters['ebuild_uuid'])
+ if 'project_uuid' in filters:
+ query = query.filter(
+ models.BinarysHeaders.project_uuid == filters['project_uuid'])
+ if 'service_uuid' in filters:
+ query = query.filter(
+ models.BinarysHeaders.service_uuid == filters['service_uuid'])
+ if 'cpv' in filters:
+ query = query.filter(
+ models.BinarysHeaders.cpv == filters['cpv'])
+ if 'build_id' in filters:
+ query = query.filter(
+ models.BinarysHeaders.build_id == filters['build_id'])
+ marker_row = None
+ if marker is not None:
+ marker_row = BinaryHeader._binary_header_get_query_from_db(context).\
+ filter_by(uuid=marker).\
+ first()
+ if not marker_row:
+ raise exception.MarkerNotFound(marker=marker)
+
+ query = sqlalchemyutils.paginate_query(query, models.BinarysHeaders,
+ limit,
+ [sort_key, 'uuid'],
+ marker=marker_row,
+ sort_dir=sort_dir)
+ return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class BinaryHeaderList(base.ObjectListBase, base.NovaObject):
+ VERSION = '1.0'
+
+ fields = {
+ 'objects': fields.ListOfObjectsField('BinaryHeader'),
+ }
+
+ @base.remotable_classmethod
+ def get_all(cls, context, inactive=False, filters=None,
+ sort_key='uuid', sort_dir='asc', limit=None, marker=None):
+ db_binary_headers = _binary_header_get_all_from_db(context,
+ inactive=inactive,
+ filters=filters,
+ sort_key=sort_key,
+ sort_dir=sort_dir,
+ limit=limit,
+ marker=marker)
+ return base.obj_make_list(context, cls(context), objects.binary_header.BinaryHeader,
+ db_binary_headers,
+ expected_attrs=[])
+
+ @db_api.main_context_manager.writer
+ def destroy_all(context):
+ context.session.query(models.BinarysHeaders).delete()
+
+ @db_api.main_context_manager.writer
+ def update_all(context):
+ values = {'status': 'waiting', }
+ db_binary_header = context.session.query(models.BinarysHeaders).filter_by(auto=True)
+ db_binary_header.update(values)
diff --git a/gosbs/portage/__init__.py b/gosbs/portage/__init__.py
new file mode 100644
index 0000000..ef2f075
--- /dev/null
+++ b/gosbs/portage/__init__.py
@@ -0,0 +1,72 @@
+# Copyright 1998-2019 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+
+from portage import _trees_dict, const
+import portage.proxy.lazyimport
+import portage.proxy as proxy
+proxy.lazyimport.lazyimport(globals(),
+ 'gosbs.portage.dbapi.bintree:bindbapi,binarytree',
+ 'portage.package.ebuild.config:autouse,best_from_dict,' + \
+ 'check_config_instance,config',
+ 'portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree',
+ 'portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,' + \
+ 'portagetree,portdbapi',
+
+ )
+
+def create_trees(config_root=None, target_root=None, trees=None, env=None,
+ sysroot=None, eprefix=None):
+
+ if trees is None:
+ trees = _trees_dict()
+ elif not isinstance(trees, _trees_dict):
+ # caller passed a normal dict or something,
+ # but we need a _trees_dict instance
+ trees = _trees_dict(trees)
+
+ if env is None:
+ env = os.environ
+
+ settings = config(config_root=config_root, target_root=target_root,
+ env=env, sysroot=sysroot, eprefix=eprefix)
+ settings.lock()
+
+ depcachedir = settings.get('PORTAGE_DEPCACHEDIR')
+ trees._target_eroot = settings['EROOT']
+ myroots = [(settings['EROOT'], settings)]
+ if settings["ROOT"] == "/" and settings["EPREFIX"] == const.EPREFIX:
+ trees._running_eroot = trees._target_eroot
+ else:
+
+ # When ROOT != "/" we only want overrides from the calling
+ # environment to apply to the config that's associated
+ # with ROOT != "/", so pass a nearly empty dict for the env parameter.
+ clean_env = {}
+ for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_REPOSITORIES', 'PORTAGE_USERNAME',
+ 'PYTHONPATH', 'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
+ 'ftp_proxy', 'http_proxy', 'https_proxy', 'no_proxy',
+ '__PORTAGE_TEST_HARDLINK_LOCKS'):
+ v = settings.get(k)
+ if v is not None:
+ clean_env[k] = v
+ if depcachedir is not None:
+ clean_env['PORTAGE_DEPCACHEDIR'] = depcachedir
+ settings = config(config_root=None, target_root="/",
+ env=clean_env, sysroot="/", eprefix=None)
+ settings.lock()
+ trees._running_eroot = settings['EROOT']
+ myroots.append((settings['EROOT'], settings))
+
+ for myroot, mysettings in myroots:
+ trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
+ trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals)
+ trees[myroot].addLazySingleton(
+ "vartree", vartree, categories=mysettings.categories,
+ settings=mysettings)
+ trees[myroot].addLazySingleton("porttree",
+ portagetree, settings=mysettings)
+ trees[myroot].addLazySingleton("bintree",
+ binarytree, pkgdir=mysettings["PKGDIR"], settings=mysettings)
+ return trees
diff --git a/gosbs/portage/dbapi/__init__.py b/gosbs/portage/dbapi/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/gosbs/portage/dbapi/__init__.py
diff --git a/gosbs/portage/dbapi/bintree.py b/gosbs/portage/dbapi/bintree.py
new file mode 100644
index 0000000..1cee532
--- /dev/null
+++ b/gosbs/portage/dbapi/bintree.py
@@ -0,0 +1,1802 @@
+# Copyright 1998-2019 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ["bindbapi", "binarytree"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ '_emerge.BinpkgExtractorAsync:BinpkgExtractorAsync',
+ 'portage.checksum:get_valid_checksum_keys,perform_multiple_checksums,' + \
+ 'verify_all,_apply_hash_filter,_hash_filter',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list',
+ 'portage.output:EOutput,colorize',
+ 'portage.locks:lockfile,unlockfile',
+ 'portage.package.ebuild.fetch:_check_distfile,_hide_url_passwd',
+ 'portage.update:update_dbentries',
+ 'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
+ 'writemsg,writemsg_stdout',
+ 'portage.util.path:first_existing',
+ 'portage.util._async.SchedulerInterface:SchedulerInterface',
+ 'portage.util._urlopen:urlopen@_urlopen,have_pep_476@_have_pep_476',
+ 'portage.versions:best,catpkgsplit,catsplit,_pkg_str',
+)
+
+from portage.cache.mappings import slot_dict_class
+from portage.const import CACHE_PATH, SUPPORTED_XPAK_EXTENSIONS
+from portage.dbapi.virtual import fakedbapi
+from portage.dep import Atom, use_reduce, paren_enclose
+from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \
+ ParseError, PermissionDenied, PortageException
+from portage.localization import _
+from portage.package.ebuild.profile_iuse import iter_iuse_vars
+from portage.util.futures import asyncio
+from portage.util.futures.compat_coroutine import coroutine
+from portage.util.futures.executor.fork import ForkExecutor
+from portage import _movefile
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+import codecs
+import errno
+import io
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+import time
+import traceback
+import warnings
+from gzip import GzipFile
+from itertools import chain
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+from gosbs.builder.binary import touch_pkg_in_db, PackageIndex
+from gosbs.context import get_admin_context
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ _unicode = str
+ basestring = str
+ long = int
+else:
+ _unicode = unicode
+
+class UseCachedCopyOfRemoteIndex(Exception):
+ # If the local copy is recent enough
+ # then fetching the remote index can be skipped.
+ pass
+
+class bindbapi(fakedbapi):
+ _known_keys = frozenset(list(fakedbapi._known_keys) + \
+ ["CHOST", "repository", "USE"])
+ _pkg_str_aux_keys = fakedbapi._pkg_str_aux_keys + ("BUILD_ID", "BUILD_TIME", "_mtime_")
+
+ def __init__(self, mybintree=None, **kwargs):
+ # Always enable multi_instance mode for bindbapi indexing. This
+ # does not affect the local PKGDIR file layout, since that is
+ # controlled independently by FEATURES=binpkg-multi-instance.
+ # The multi_instance mode is useful for the following reasons:
+ # * binary packages with the same cpv from multiple binhosts
+ # can be considered simultaneously
+ # * if binpkg-multi-instance is disabled, it's still possible
+ # to properly access a PKGDIR which has binpkg-multi-instance
+ # layout (or mixed layout)
+ fakedbapi.__init__(self, exclusive_slots=False,
+ multi_instance=True, **kwargs)
+ self.bintree = mybintree
+ self.move_ent = mybintree.move_ent
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(
+ ["BDEPEND", "BUILD_ID", "BUILD_TIME", "CHOST", "DEFINED_PHASES",
+ "DEPEND", "EAPI", "IUSE", "KEYWORDS",
+ "LICENSE", "MD5", "PDEPEND", "PROPERTIES",
+ "PROVIDES", "RDEPEND", "repository", "REQUIRES", "RESTRICT",
+ "SIZE", "SLOT", "USE", "_mtime_"
+ ])
+ self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
+ self._aux_cache = {}
+
+ @property
+ def writable(self):
+ """
+ Check if PKGDIR is writable, or permissions are sufficient
+ to create it if it does not exist yet.
+ @rtype: bool
+ @return: True if PKGDIR is writable or can be created,
+ False otherwise
+ """
+ return os.access(first_existing(self.bintree.pkgdir), os.W_OK)
+
+ def match(self, *pargs, **kwargs):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.match(self, *pargs, **kwargs)
+
+ def cpv_exists(self, cpv, myrepo=None):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_exists(self, cpv)
+
+ def cpv_inject(self, cpv, **kwargs):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ fakedbapi.cpv_inject(self, cpv,
+ metadata=cpv._metadata, **kwargs)
+
+ def cpv_remove(self, cpv):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ fakedbapi.cpv_remove(self, cpv)
+
+ def aux_get(self, mycpv, wants, myrepo=None):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ # Support plain string for backward compatibility with API
+ # consumers (including portageq, which passes in a cpv from
+ # a command-line argument).
+ instance_key = self._instance_key(mycpv,
+ support_string=True)
+ if not self._known_keys.intersection(
+ wants).difference(self._aux_cache_keys):
+ aux_cache = self.cpvdict[instance_key]
+ if aux_cache is not None:
+ return [aux_cache.get(x, "") for x in wants]
+ mysplit = mycpv.split("/")
+ mylist = []
+ add_pkg = self.bintree._additional_pkgs.get(instance_key)
+ if add_pkg is not None:
+ return add_pkg._db.aux_get(add_pkg, wants)
+ elif not self.bintree._remotepkgs or \
+ not self.bintree.isremote(mycpv):
+ try:
+ tbz2_path = self.bintree._pkg_paths[instance_key]
+ except KeyError:
+ raise KeyError(mycpv)
+ tbz2_path = os.path.join(self.bintree.pkgdir, tbz2_path)
+ try:
+ st = os.lstat(tbz2_path)
+ except OSError:
+ raise KeyError(mycpv)
+ metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
+ def getitem(k):
+ if k == "_mtime_":
+ return _unicode(st[stat.ST_MTIME])
+ elif k == "SIZE":
+ return _unicode(st.st_size)
+ v = metadata_bytes.get(_unicode_encode(k,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace'))
+ if v is not None:
+ v = _unicode_decode(v,
+ encoding=_encodings['repo.content'], errors='replace')
+ return v
+ else:
+ getitem = self.cpvdict[instance_key].get
+ mydata = {}
+ mykeys = wants
+ for x in mykeys:
+ myval = getitem(x)
+ # myval is None if the key doesn't exist
+ # or the tbz2 is corrupt.
+ if myval:
+ mydata[x] = " ".join(myval.split())
+
+ if not mydata.setdefault('EAPI', '0'):
+ mydata['EAPI'] = '0'
+
+ return [mydata.get(x, '') for x in wants]
+
+ def aux_update(self, cpv, values):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ build_id = None
+ try:
+ build_id = cpv.build_id
+ except AttributeError:
+ if self.bintree._multi_instance:
+ # The cpv.build_id attribute is required if we are in
+ # multi-instance mode, since otherwise we won't know
+ # which instance to update.
+ raise
+ else:
+ cpv = self._instance_key(cpv, support_string=True)[0]
+ build_id = cpv.build_id
+
+ tbz2path = self.bintree.getname(cpv)
+ if not os.path.exists(tbz2path):
+ raise KeyError(cpv)
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+
+ for k, v in values.items():
+ k = _unicode_encode(k,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ v = _unicode_encode(v,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mydata[k] = v
+
+ for k, v in list(mydata.items()):
+ if not v:
+ del mydata[k]
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+ # inject will clear stale caches via cpv_inject.
+ self.bintree.inject(cpv, filename=tbz2path)
+
+
+ @coroutine
+ def unpack_metadata(self, pkg, dest_dir):
+ """
+ Unpack package metadata to a directory. This method is a coroutine.
+
+ @param pkg: package to unpack
+ @type pkg: _pkg_str or portage.config
+ @param dest_dir: destination directory
+ @type dest_dir: str
+ """
+ loop = asyncio._wrap_loop()
+ if isinstance(pkg, _pkg_str):
+ cpv = pkg
+ else:
+ cpv = pkg.mycpv
+ key = self._instance_key(cpv)
+ add_pkg = self.bintree._additional_pkgs.get(key)
+ if add_pkg is not None:
+ yield add_pkg._db.unpack_metadata(pkg, dest_dir)
+ else:
+ tbz2_file = self.bintree.getname(cpv)
+ yield loop.run_in_executor(ForkExecutor(loop=loop),
+ portage.xpak.tbz2(tbz2_file).unpackinfo, dest_dir)
+
+ @coroutine
+ def unpack_contents(self, pkg, dest_dir):
+ """
+ Unpack package contents to a directory. This method is a coroutine.
+
+ @param pkg: package to unpack
+ @type pkg: _pkg_str or portage.config
+ @param dest_dir: destination directory
+ @type dest_dir: str
+ """
+ loop = asyncio._wrap_loop()
+ if isinstance(pkg, _pkg_str):
+ settings = self.settings
+ cpv = pkg
+ else:
+ settings = pkg
+ cpv = settings.mycpv
+
+ pkg_path = self.bintree.getname(cpv)
+ if pkg_path is not None:
+
+ extractor = BinpkgExtractorAsync(
+ background=settings.get('PORTAGE_BACKGROUND') == '1',
+ env=settings.environ(),
+ features=settings.features,
+ image_dir=dest_dir,
+ pkg=cpv, pkg_path=pkg_path,
+ logfile=settings.get('PORTAGE_LOG_FILE'),
+ scheduler=SchedulerInterface(loop))
+
+ extractor.start()
+ yield extractor.async_wait()
+ if extractor.returncode != os.EX_OK:
+ raise PortageException("Error Extracting '{}'".format(pkg_path))
+
+ else:
+ instance_key = self._instance_key(cpv)
+ add_pkg = self.bintree._additional_pkgs.get(instance_key)
+ if add_pkg is None:
+ raise portage.exception.PackageNotFound(cpv)
+ yield add_pkg._db.unpack_contents(pkg, dest_dir)
+
+ def cp_list(self, *pargs, **kwargs):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_list(self, *pargs, **kwargs)
+
+ def cp_all(self, sort=False):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_all(self, sort=sort)
+
+ def cpv_all(self):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_all(self)
+
+ def getfetchsizes(self, pkg):
+ """
+ This will raise MissingSignature if SIZE signature is not available,
+ or InvalidSignature if SIZE signature is invalid.
+ """
+
+ if not self.bintree.populated:
+ self.bintree.populate()
+
+ pkg = getattr(pkg, 'cpv', pkg)
+
+ filesdict = {}
+ if not self.bintree.isremote(pkg):
+ pass
+ else:
+ metadata = self.bintree._remotepkgs[self._instance_key(pkg)]
+ try:
+ size = int(metadata["SIZE"])
+ except KeyError:
+ raise portage.exception.MissingSignature("SIZE")
+ except ValueError:
+ raise portage.exception.InvalidSignature(
+ "SIZE: %s" % metadata["SIZE"])
+ else:
+ filesdict[os.path.basename(self.bintree.getname(pkg))] = size
+
+ return filesdict
+
+
+class binarytree(object):
+ "this tree scans for a list of all packages available in PKGDIR"
+ def __init__(self, _unused=DeprecationWarning, pkgdir=None,
+ virtual=DeprecationWarning, settings=None):
+ self.context = get_admin_context()
+
+ if pkgdir is None:
+ raise TypeError("pkgdir parameter is required")
+
+ if settings is None:
+ raise TypeError("settings parameter is required")
+
+ if _unused is not DeprecationWarning:
+ warnings.warn("The first parameter of the "
+ "portage.dbapi.bintree.binarytree"
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
+ DeprecationWarning, stacklevel=2)
+
+ if virtual is not DeprecationWarning:
+ warnings.warn("The 'virtual' parameter of the "
+ "portage.dbapi.bintree.binarytree"
+ " constructor is unused",
+ DeprecationWarning, stacklevel=2)
+
+ if True:
+ self.pkgdir = normalize_path(pkgdir)
+ # NOTE: Event if binpkg-multi-instance is disabled, it's
+ # still possible to access a PKGDIR which uses the
+ # binpkg-multi-instance layout (or mixed layout).
+ self._multi_instance = ("binpkg-multi-instance" in
+ settings.features)
+ if self._multi_instance:
+ self._allocate_filename = self._allocate_filename_multi
+ self.dbapi = bindbapi(self, settings=settings)
+ self.update_ents = self.dbapi.update_ents
+ self.move_slot_ent = self.dbapi.move_slot_ent
+ self.populated = 0
+ self.tree = {}
+ self._remote_has_index = False
+ self._remotepkgs = None # remote metadata indexed by cpv
+ self._additional_pkgs = {}
+ self.invalids = []
+ self.settings = settings
+ self._pkg_paths = {}
+ self._populating = False
+ self._all_directory = os.path.isdir(
+ os.path.join(self.pkgdir, "All"))
+ self._pkgindex_version = 0
+ self._pkgindex_hashes = ["MD5","SHA1"]
+ self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
+ self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
+ self._pkgindex_keys.update(["CPV", "SIZE"])
+ self._pkgindex_aux_keys = \
+ ["BASE_URI", "BDEPEND", "BUILD_ID", "BUILD_TIME", "CHOST",
+ "DEFINED_PHASES", "DEPEND", "DESCRIPTION", "EAPI",
+ "IUSE", "KEYWORDS", "LICENSE", "PDEPEND",
+ "PKGINDEX_URI", "PROPERTIES", "PROVIDES",
+ "RDEPEND", "repository", "REQUIRES", "RESTRICT",
+ "SIZE", "SLOT", "USE"]
+ self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
+ self._pkgindex_use_evaluated_keys = \
+ ("BDEPEND", "DEPEND", "LICENSE", "RDEPEND",
+ "PDEPEND", "PROPERTIES", "RESTRICT")
+ self._pkgindex_header = None
+ self._pkgindex_header_keys = set([
+ "ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
+ "ACCEPT_PROPERTIES", "ACCEPT_RESTRICT", "CBUILD",
+ "CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
+ "GENTOO_MIRRORS", "INSTALL_MASK", "IUSE_IMPLICIT", "USE",
+ "USE_EXPAND", "USE_EXPAND_HIDDEN", "USE_EXPAND_IMPLICIT",
+ "USE_EXPAND_UNPREFIXED"])
+ self._pkgindex_default_pkg_data = {
+ "BDEPEND" : "",
+ "BUILD_ID" : "",
+ "BUILD_TIME" : "",
+ "DEFINED_PHASES" : "",
+ "DEPEND" : "",
+ "EAPI" : "0",
+ "IUSE" : "",
+ "KEYWORDS": "",
+ "LICENSE" : "",
+ "PATH" : "",
+ "PDEPEND" : "",
+ "PROPERTIES" : "",
+ "PROVIDES": "",
+ "RDEPEND" : "",
+ "REQUIRES": "",
+ "RESTRICT": "",
+ "SLOT" : "0",
+ "USE" : "",
+ }
+ self._pkgindex_inherited_keys = ["CHOST", "repository"]
+
+ # Populate the header with appropriate defaults.
+ self._pkgindex_default_header_data = {
+ "CHOST" : self.settings.get("CHOST", ""),
+ "repository" : "",
+ }
+
+ self._pkgindex_translated_keys = (
+ ("DESCRIPTION" , "DESC"),
+ ("_mtime_" , "MTIME"),
+ ("repository" , "REPO"),
+ )
+
+ self._pkgindex_allowed_pkg_keys = set(chain(
+ self._pkgindex_keys,
+ self._pkgindex_aux_keys,
+ self._pkgindex_hashes,
+ self._pkgindex_default_pkg_data,
+ self._pkgindex_inherited_keys,
+ chain(*self._pkgindex_translated_keys)
+ ))
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of "
+ "portage.dbapi.bintree.binarytree"
+ " is deprecated. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ def move_ent(self, mylist, repo_match=None):
+ if not self.populated:
+ self.populate()
+ origcp = mylist[1]
+ newcp = mylist[2]
+ # sanity check
+ for atom in (origcp, newcp):
+ if not isjustname(atom):
+ raise InvalidPackageName(_unicode(atom))
+ mynewcat = catsplit(newcp)[0]
+ origmatches=self.dbapi.cp_list(origcp)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ try:
+ mycpv = self.dbapi._pkg_str(mycpv, None)
+ except (KeyError, InvalidData):
+ continue
+ mycpv_cp = portage.cpv_getkey(mycpv)
+ if mycpv_cp != origcp:
+ # Ignore PROVIDE virtual match.
+ continue
+ if repo_match is not None \
+ and not repo_match(mycpv.repo):
+ continue
+
+ # Use isvalidatom() to check if this move is valid for the
+ # EAPI (characters allowed in package names may vary).
+ if not isvalidatom(newcp, eapi=mycpv.eapi):
+ continue
+
+ mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
+ myoldpkg = catsplit(mycpv)[1]
+ mynewpkg = catsplit(mynewcpv)[1]
+
+ if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
+ writemsg(_("!!! Cannot update binary: Destination exists.\n"),
+ noiselevel=-1)
+ writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
+ continue
+
+ tbz2path = self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
+ noiselevel=-1)
+ continue
+
+ moves += 1
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ updated_items = update_dbentries([mylist], mydata, parent=mycpv)
+ mydata.update(updated_items)
+ mydata[b'PF'] = \
+ _unicode_encode(mynewpkg + "\n",
+ encoding=_encodings['repo.content'])
+ mydata[b'CATEGORY'] = \
+ _unicode_encode(mynewcat + "\n",
+ encoding=_encodings['repo.content'])
+ if mynewpkg != myoldpkg:
+ ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
+ encoding=_encodings['repo.content']), None)
+ if ebuild_data is not None:
+ mydata[_unicode_encode(mynewpkg + '.ebuild',
+ encoding=_encodings['repo.content'])] = ebuild_data
+
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+
+ self.dbapi.cpv_remove(mycpv)
+ del self._pkg_paths[self.dbapi._instance_key(mycpv)]
+ metadata = self.dbapi._aux_cache_slot_dict()
+ for k in self.dbapi._aux_cache_keys:
+ v = mydata.get(_unicode_encode(k))
+ if v is not None:
+ v = _unicode_decode(v)
+ metadata[k] = " ".join(v.split())
+ mynewcpv = _pkg_str(mynewcpv, metadata=metadata, db=self.dbapi)
+ new_path = self.getname(mynewcpv)
+ self._pkg_paths[
+ self.dbapi._instance_key(mynewcpv)] = new_path[len(self.pkgdir)+1:]
+ if new_path != mytbz2:
+ self._ensure_dir(os.path.dirname(new_path))
+ _movefile(tbz2path, new_path, mysettings=self.settings)
+ self.inject(mynewcpv)
+
+ return moves
+
+ def prevent_collision(self, cpv):
+ warnings.warn("The "
+ "portage.dbapi.bintree.binarytree.prevent_collision "
+ "method is deprecated.",
+ DeprecationWarning, stacklevel=2)
+
+ def _ensure_dir(self, path):
+ """
+ Create the specified directory. Also, copy gid and group mode
+ bits from self.pkgdir if possible.
+ @param cat_dir: Absolute path of the directory to be created.
+ @type cat_dir: String
+ """
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ ensure_dirs(path)
+ return
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
+ try:
+ ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ if not os.path.isdir(path):
+ raise
+
+ def _file_permissions(self, path):
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ pass
+ else:
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o0060 & pkgdir_st.st_mode
+ try:
+ portage.util.apply_permissions(path, gid=pkgdir_gid,
+ mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ pass
+
+ def populate(self, getbinpkgs=False, getbinpkg_refresh=True, add_repos=()):
+ """
+ Populates the binarytree with package metadata.
+
+ @param getbinpkgs: include remote packages
+ @type getbinpkgs: bool
+ @param getbinpkg_refresh: attempt to refresh the cache
+ of remote package metadata if getbinpkgs is also True
+ @type getbinpkg_refresh: bool
+ @param add_repos: additional binary package repositories
+ @type add_repos: sequence
+ """
+
+ if self._populating:
+ return
+
+ if not os.path.isdir(self.pkgdir) and not (getbinpkgs or add_repos):
+ self.populated = True
+ return
+
+ # Clear all caches in case populate is called multiple times
+ # as may be the case when _global_updates calls populate()
+ # prior to performing package moves since it only wants to
+ # operate on local packages (getbinpkgs=0).
+ self._remotepkgs = None
+
+ self._populating = True
+ try:
+ update_pkgindex = self._populate_local(
+ reindex='pkgdir-index-trusted' not in self.settings.features)
+ if update_pkgindex and self.dbapi.writable:
+ # If the Packages file needs to be updated, then _populate_local
+ # needs to be called once again while the file is locked, so
+ # that changes made by a concurrent process cannot be lost. This
+ # case is avoided when possible, in order to minimize lock
+ # contention.
+ pkgindex_lock = None
+ try:
+ pkgindex_lock = lockfile(self._pkgindex_file,
+ wantnewlockfile=True)
+ update_pkgindex = self._populate_local()
+ if update_pkgindex:
+ self._pkgindex_write(update_pkgindex, None)
+
+ finally:
+ if pkgindex_lock:
+ unlockfile(pkgindex_lock)
+
+ if add_repos:
+ self._populate_additional(add_repos)
+
+ if getbinpkgs:
+ if not self.settings.get("PORTAGE_BINHOST"):
+ writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
+ noiselevel=-1)
+ else:
+ self._populate_remote(getbinpkg_refresh=getbinpkg_refresh)
+
+ finally:
+ self._populating = False
+
+ self.populated = True
+
+ def _populate_local(self, reindex=True):
+ """
+ Populates the binarytree with local package metadata.
+
+ @param reindex: detect added / modified / removed packages and
+ regenerate the index file if necessary
+ @type reindex: bool
+ """
+ self.dbapi.clear()
+ _instance_key = self.dbapi._instance_key
+ # In order to minimize disk I/O, we never compute digests here.
+ # Therefore we exclude hashes from the minimum_keys, so that
+ # the Packages file will not be needlessly re-written due to
+ # missing digests.
+ minimum_keys = self._pkgindex_keys.difference(self._pkgindex_hashes)
+ if True:
+ pkg_paths = {}
+ self._pkg_paths = pkg_paths
+ dir_files = {}
+ if reindex:
+ for parent, dir_names, file_names in os.walk(self.pkgdir):
+ relative_parent = parent[len(self.pkgdir)+1:]
+ dir_files[relative_parent] = file_names
+
+ pkgindex = self._load_pkgindex()
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+ metadata = {}
+ basename_index = {}
+ for d in pkgindex.packages:
+ cpv = _pkg_str(d["CPV"], metadata=d,
+ settings=self.settings, db=self.dbapi)
+ d["CPV"] = cpv
+ metadata[_instance_key(cpv)] = d
+ path = d.get("PATH")
+ if not path:
+ path = cpv + ".tbz2"
+
+ if reindex:
+ basename = os.path.basename(path)
+ basename_index.setdefault(basename, []).append(d)
+ else:
+ instance_key = _instance_key(cpv)
+ pkg_paths[instance_key] = path
+ self.dbapi.cpv_inject(cpv)
+
+ update_pkgindex = False
+ for mydir, file_names in dir_files.items():
+ try:
+ mydir = _unicode_decode(mydir,
+ encoding=_encodings["fs"], errors="strict")
+ except UnicodeDecodeError:
+ continue
+ for myfile in file_names:
+ try:
+ myfile = _unicode_decode(myfile,
+ encoding=_encodings["fs"], errors="strict")
+ except UnicodeDecodeError:
+ continue
+ if not myfile.endswith(SUPPORTED_XPAK_EXTENSIONS):
+ continue
+ mypath = os.path.join(mydir, myfile)
+ full_path = os.path.join(self.pkgdir, mypath)
+ s = os.lstat(full_path)
+
+ if not stat.S_ISREG(s.st_mode):
+ continue
+
+ # Validate data from the package index and try to avoid
+ # reading the xpak if possible.
+ possibilities = basename_index.get(myfile)
+ if possibilities:
+ match = None
+ for d in possibilities:
+ try:
+ if long(d["_mtime_"]) != s[stat.ST_MTIME]:
+ continue
+ except (KeyError, ValueError):
+ continue
+ try:
+ if long(d["SIZE"]) != long(s.st_size):
+ continue
+ except (KeyError, ValueError):
+ continue
+ if not minimum_keys.difference(d):
+ match = d
+ break
+ if match:
+ mycpv = match["CPV"]
+ instance_key = _instance_key(mycpv)
+ pkg_paths[instance_key] = mypath
+ # update the path if the package has been moved
+ oldpath = d.get("PATH")
+ if oldpath and oldpath != mypath:
+ update_pkgindex = True
+ # Omit PATH if it is the default path for
+ # the current Packages format version.
+ if mypath != mycpv + ".tbz2":
+ d["PATH"] = mypath
+ if not oldpath:
+ update_pkgindex = True
+ else:
+ d.pop("PATH", None)
+ if oldpath:
+ update_pkgindex = True
+ self.dbapi.cpv_inject(mycpv)
+ continue
+ if not os.access(full_path, os.R_OK):
+ writemsg(_("!!! Permission denied to read " \
+ "binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ self.invalids.append(myfile[:-5])
+ continue
+ pkg_metadata = self._read_metadata(full_path, s,
+ keys=chain(self.dbapi._aux_cache_keys,
+ ("PF", "CATEGORY")))
+ mycat = pkg_metadata.get("CATEGORY", "")
+ mypf = pkg_metadata.get("PF", "")
+ slot = pkg_metadata.get("SLOT", "")
+ mypkg = myfile[:-5]
+ if not mycat or not mypf or not slot:
+ #old-style or corrupt package
+ writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ missing_keys = []
+ if not mycat:
+ missing_keys.append("CATEGORY")
+ if not mypf:
+ missing_keys.append("PF")
+ if not slot:
+ missing_keys.append("SLOT")
+ msg = []
+ if missing_keys:
+ missing_keys.sort()
+ msg.append(_("Missing metadata key(s): %s.") % \
+ ", ".join(missing_keys))
+ msg.append(_(" This binary package is not " \
+ "recoverable and should be deleted."))
+ for line in textwrap.wrap("".join(msg), 72):
+ writemsg("!!! %s\n" % line, noiselevel=-1)
+ self.invalids.append(mypkg)
+ continue
+
+ multi_instance = False
+ invalid_name = False
+ build_id = None
+ if myfile.endswith(".xpak"):
+ multi_instance = True
+ build_id = self._parse_build_id(myfile)
+ if build_id < 1:
+ invalid_name = True
+ elif myfile != "%s-%s.xpak" % (
+ mypf, build_id):
+ invalid_name = True
+ else:
+ mypkg = mypkg[:-len(str(build_id))-1]
+ elif myfile != mypf + ".tbz2":
+ invalid_name = True
+
+ if invalid_name:
+ writemsg(_("\n!!! Binary package name is "
+ "invalid: '%s'\n") % full_path,
+ noiselevel=-1)
+ continue
+
+ if pkg_metadata.get("BUILD_ID"):
+ try:
+ build_id = long(pkg_metadata["BUILD_ID"])
+ except ValueError:
+ writemsg(_("!!! Binary package has "
+ "invalid BUILD_ID: '%s'\n") %
+ full_path, noiselevel=-1)
+ continue
+ else:
+ build_id = None
+
+ if multi_instance:
+ name_split = catpkgsplit("%s/%s" %
+ (mycat, mypf))
+ if (name_split is None or
+ tuple(catsplit(mydir)) != name_split[:2]):
+ continue
+ elif mycat != mydir and mydir != "All":
+ continue
+ if mypkg != mypf.strip():
+ continue
+ mycpv = mycat + "/" + mypkg
+ if not self.dbapi._category_re.match(mycat):
+ writemsg(_("!!! Binary package has an " \
+ "unrecognized category: '%s'\n") % full_path,
+ noiselevel=-1)
+ writemsg(_("!!! '%s' has a category that is not" \
+ " listed in %setc/portage/categories\n") % \
+ (mycpv, self.settings["PORTAGE_CONFIGROOT"]),
+ noiselevel=-1)
+ continue
+ if build_id is not None:
+ pkg_metadata["BUILD_ID"] = _unicode(build_id)
+ pkg_metadata["SIZE"] = _unicode(s.st_size)
+ # Discard items used only for validation above.
+ pkg_metadata.pop("CATEGORY")
+ pkg_metadata.pop("PF")
+ mycpv = _pkg_str(mycpv,
+ metadata=self.dbapi._aux_cache_slot_dict(pkg_metadata),
+ db=self.dbapi)
+ pkg_paths[_instance_key(mycpv)] = mypath
+ self.dbapi.cpv_inject(mycpv)
+ update_pkgindex = True
+ d = metadata.get(_instance_key(mycpv),
+ pkgindex._pkg_slot_dict())
+ if d:
+ try:
+ if long(d["_mtime_"]) != s[stat.ST_MTIME]:
+ d.clear()
+ except (KeyError, ValueError):
+ d.clear()
+ if d:
+ try:
+ if long(d["SIZE"]) != long(s.st_size):
+ d.clear()
+ except (KeyError, ValueError):
+ d.clear()
+
+ for k in self._pkgindex_allowed_pkg_keys:
+ v = pkg_metadata.get(k)
+ if v:
+ d[k] = v
+ d["CPV"] = mycpv
+
+ try:
+ self._eval_use_flags(mycpv, d)
+ except portage.exception.InvalidDependString:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % \
+ self.getname(mycpv), noiselevel=-1)
+ self.dbapi.cpv_remove(mycpv)
+ del pkg_paths[_instance_key(mycpv)]
+
+ # record location if it's non-default
+ if mypath != mycpv + ".tbz2":
+ d["PATH"] = mypath
+ else:
+ d.pop("PATH", None)
+ metadata[_instance_key(mycpv)] = d
+
+ if reindex:
+ for instance_key in list(metadata):
+ if instance_key not in pkg_paths:
+ del metadata[instance_key]
+
+ if update_pkgindex:
+ del pkgindex.packages[:]
+ pkgindex.packages.extend(iter(metadata.values()))
+ self._update_pkgindex_header(pkgindex.header)
+
+ self._pkgindex_header = {}
+ self._merge_pkgindex_header(pkgindex.header,
+ self._pkgindex_header)
+
+ return pkgindex if update_pkgindex else None
+
+ def _populate_remote(self, getbinpkg_refresh=True):
+
+ self._remote_has_index = False
+ self._remotepkgs = {}
+ for base_url in self.settings["PORTAGE_BINHOST"].split():
+ parsed_url = urlparse(base_url)
+ host = parsed_url.netloc
+ port = parsed_url.port
+ user = None
+ passwd = None
+ user_passwd = ""
+ if "@" in host:
+ user, host = host.split("@", 1)
+ user_passwd = user + "@"
+ if ":" in user:
+ user, passwd = user.split(":", 1)
+
+ if port is not None:
+ port_str = ":%s" % (port,)
+ if host.endswith(port_str):
+ host = host[:-len(port_str)]
+ pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
+ host, parsed_url.path.lstrip("/"), "Packages")
+ pkgindex = self._new_pkgindex(objectsstor=True)
+ try:
+ f = io.open(_unicode_encode(pkgindex_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ try:
+ pkgindex.read(f)
+ finally:
+ f.close()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ local_timestamp = pkgindex.header.get("TIMESTAMP", None)
+ try:
+ download_timestamp = \
+ float(pkgindex.header.get("DOWNLOAD_TIMESTAMP", 0))
+ except ValueError:
+ download_timestamp = 0
+ remote_timestamp = None
+ rmt_idx = self._new_pkgindex()
+ proc = None
+ tmp_filename = None
+ try:
+ # urlparse.urljoin() only works correctly with recognized
+ # protocols and requires the base url to have a trailing
+ # slash, so join manually...
+ url = base_url.rstrip("/") + "/Packages"
+ f = None
+
+ if not getbinpkg_refresh and local_timestamp:
+ raise UseCachedCopyOfRemoteIndex()
+
+ try:
+ ttl = float(pkgindex.header.get("TTL", 0))
+ except ValueError:
+ pass
+ else:
+ if download_timestamp and ttl and \
+ download_timestamp + ttl > time.time():
+ raise UseCachedCopyOfRemoteIndex()
+
+ # Don't use urlopen for https, unless
+ # PEP 476 is supported (bug #469888).
+ if parsed_url.scheme not in ('https',) or _have_pep_476():
+ try:
+ f = _urlopen(url, if_modified_since=local_timestamp)
+ if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
+ remote_timestamp = f.headers.get('timestamp')
+ except IOError as err:
+ if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
+ raise UseCachedCopyOfRemoteIndex()
+
+ if parsed_url.scheme in ('ftp', 'http', 'https'):
+ # This protocol is supposedly supported by urlopen,
+ # so apparently there's a problem with the url
+ # or a bug in urlopen.
+ if self.settings.get("PORTAGE_DEBUG", "0") != "0":
+ traceback.print_exc()
+
+ raise
+ except ValueError:
+ raise ParseError("Invalid Portage BINHOST value '%s'"
+ % url.lstrip())
+
+ if f is None:
+
+ path = parsed_url.path.rstrip("/") + "/Packages"
+
+ if parsed_url.scheme == 'ssh':
+ # Use a pipe so that we can terminate the download
+ # early if we detect that the TIMESTAMP header
+ # matches that of the cached Packages file.
+ ssh_args = ['ssh']
+ if port is not None:
+ ssh_args.append("-p%s" % (port,))
+ # NOTE: shlex evaluates embedded quotes
+ ssh_args.extend(portage.util.shlex_split(
+ self.settings.get("PORTAGE_SSH_OPTS", "")))
+ ssh_args.append(user_passwd + host)
+ ssh_args.append('--')
+ ssh_args.append('cat')
+ ssh_args.append(path)
+
+ proc = subprocess.Popen(ssh_args,
+ stdout=subprocess.PIPE)
+ f = proc.stdout
+ else:
+ setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
+ fcmd = self.settings.get(setting)
+ if not fcmd:
+ fcmd = self.settings.get('FETCHCOMMAND')
+ if not fcmd:
+ raise EnvironmentError("FETCHCOMMAND is unset")
+
+ fd, tmp_filename = tempfile.mkstemp()
+ tmp_dirname, tmp_basename = os.path.split(tmp_filename)
+ os.close(fd)
+
+ fcmd_vars = {
+ "DISTDIR": tmp_dirname,
+ "FILE": tmp_basename,
+ "URI": url
+ }
+
+ for k in ("PORTAGE_SSH_OPTS",):
+ v = self.settings.get(k)
+ if v is not None:
+ fcmd_vars[k] = v
+
+ success = portage.getbinpkg.file_get(
+ fcmd=fcmd, fcmd_vars=fcmd_vars)
+ if not success:
+ raise EnvironmentError("%s failed" % (setting,))
+ f = open(tmp_filename, 'rb')
+
+ f_dec = codecs.iterdecode(f,
+ _encodings['repo.content'], errors='replace')
+ try:
+ rmt_idx.readHeader(f_dec)
+ if not remote_timestamp: # in case it had not been read from HTTP header
+ remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
+ if not remote_timestamp:
+ # no timestamp in the header, something's wrong
+ pkgindex = None
+ writemsg(_("\n\n!!! Binhost package index " \
+ " has no TIMESTAMP field.\n"), noiselevel=-1)
+ else:
+ if not self._pkgindex_version_supported(rmt_idx):
+ writemsg(_("\n\n!!! Binhost package index version" \
+ " is not supported: '%s'\n") % \
+ rmt_idx.header.get("VERSION"), noiselevel=-1)
+ pkgindex = None
+ elif local_timestamp != remote_timestamp:
+ rmt_idx.readBody(f_dec)
+ pkgindex = rmt_idx
+ finally:
+ # Timeout after 5 seconds, in case close() blocks
+ # indefinitely (see bug #350139).
+ try:
+ try:
+ AlarmSignal.register(5)
+ f.close()
+ finally:
+ AlarmSignal.unregister()
+ except AlarmSignal:
+ writemsg("\n\n!!! %s\n" % \
+ _("Timed out while closing connection to binhost"),
+ noiselevel=-1)
+ except UseCachedCopyOfRemoteIndex:
+ writemsg_stdout("\n")
+ writemsg_stdout(
+ colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
+ "\n")
+ rmt_idx = pkgindex
+ except EnvironmentError as e:
+ # This includes URLError which is raised for SSL
+ # certificate errors when PEP 476 is supported.
+ writemsg(_("\n\n!!! Error fetching binhost package" \
+ " info from '%s'\n") % _hide_url_passwd(base_url))
+ # With Python 2, the EnvironmentError message may
+ # contain bytes or unicode, so use _unicode to ensure
+ # safety with all locales (bug #532784).
+ try:
+ error_msg = _unicode(e)
+ except UnicodeDecodeError as uerror:
+ error_msg = _unicode(uerror.object,
+ encoding='utf_8', errors='replace')
+ writemsg("!!! %s\n\n" % error_msg)
+ del e
+ pkgindex = None
+ if proc is not None:
+ if proc.poll() is None:
+ proc.kill()
+ proc.wait()
+ proc = None
+ if tmp_filename is not None:
+ try:
+ os.unlink(tmp_filename)
+ except OSError:
+ pass
+ if pkgindex is rmt_idx:
+ pkgindex.modified = False # don't update the header
+ pkgindex.header["DOWNLOAD_TIMESTAMP"] = "%d" % time.time()
+ try:
+ ensure_dirs(os.path.dirname(pkgindex_file))
+ f = atomic_ofstream(pkgindex_file)
+ pkgindex.write(f)
+ f.close()
+ except (IOError, PortageException):
+ if os.access(os.path.dirname(pkgindex_file), os.W_OK):
+ raise
+ # The current user doesn't have permission to cache the
+ # file, but that's alright.
+ if pkgindex:
+ remote_base_uri = pkgindex.header.get("URI", base_url)
+ for d in pkgindex.packages:
+ cpv = _pkg_str(d["CPV"], metadata=d,
+ settings=self.settings, db=self.dbapi)
+ # Local package instances override remote instances
+ # with the same instance_key.
+ if self.dbapi.cpv_exists(cpv):
+ continue
+
+ d["CPV"] = cpv
+ d["BASE_URI"] = remote_base_uri
+ d["PKGINDEX_URI"] = url
+ self._remotepkgs[self.dbapi._instance_key(cpv)] = d
+ self.dbapi.cpv_inject(cpv)
+
+ self._remote_has_index = True
+ self._merge_pkgindex_header(pkgindex.header,
+ self._pkgindex_header)
+
+ def _populate_additional(self, repos):
+ for repo in repos:
+ aux_keys = list(set(chain(repo._aux_cache_keys, repo._pkg_str_aux_keys)))
+ for cpv in repo.cpv_all():
+ metadata = dict(zip(aux_keys, repo.aux_get(cpv, aux_keys)))
+ pkg = _pkg_str(cpv, metadata=metadata, settings=repo.settings, db=repo)
+ instance_key = self.dbapi._instance_key(pkg)
+ self._additional_pkgs[instance_key] = pkg
+ self.dbapi.cpv_inject(pkg)
+
+ def inject(self, cpv, filename=None, objectsstor=False):
+ """Add a freshly built package to the database. This updates
+ $PKGDIR/Packages with the new package metadata (including MD5).
+ @param cpv: The cpv of the new package to inject
+ @type cpv: string
+ @param filename: File path of the package to inject, or None if it's
+ already in the location returned by getname()
+ @type filename: string
+ @rtype: _pkg_str or None
+ @return: A _pkg_str instance on success, or None on failure.
+ """
+ mycat, mypkg = catsplit(cpv)
+ if not self.populated:
+ self.populate()
+ if filename is None:
+ full_path = self.getname(cpv)
+ else:
+ full_path = filename
+ try:
+ s = os.stat(full_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
+ noiselevel=-1)
+ return
+ metadata = self._read_metadata(full_path, s)
+ invalid_depend = False
+ try:
+ self._eval_use_flags(cpv, metadata)
+ except portage.exception.InvalidDependString:
+ invalid_depend = True
+ if invalid_depend or not metadata.get("SLOT"):
+ writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ return
+
+ fetched = False
+ try:
+ build_id = cpv.build_id
+ except AttributeError:
+ build_id = None
+ else:
+ instance_key = self.dbapi._instance_key(cpv)
+ if instance_key in self.dbapi.cpvdict:
+ # This means we've been called by aux_update (or
+ # similar). The instance key typically changes (due to
+ # file modification), so we need to discard existing
+ # instance key references.
+ self.dbapi.cpv_remove(cpv)
+ self._pkg_paths.pop(instance_key, None)
+ if self._remotepkgs is not None:
+ fetched = self._remotepkgs.pop(instance_key, None)
+
+ cpv = _pkg_str(cpv, metadata=metadata, settings=self.settings,
+ db=self.dbapi)
+
+ # Reread the Packages index (in case it's been changed by another
+ # process) and then updated it, all while holding a lock.
+ pkgindex_lock = None
+ try:
+ pkgindex_lock = lockfile(self._pkgindex_file,
+ wantnewlockfile=1)
+ if filename is not None:
+ new_filename = self.getname(cpv, allocate_new=True)
+ try:
+ samefile = os.path.samefile(filename, new_filename)
+ except OSError:
+ samefile = False
+ if not samefile:
+ self._ensure_dir(os.path.dirname(new_filename))
+ _movefile(filename, new_filename, mysettings=self.settings)
+ full_path = new_filename
+
+ basename = os.path.basename(full_path)
+ pf = catsplit(cpv)[1]
+ if (build_id is None and not fetched and
+ basename.endswith(".xpak")):
+ # Apply the newly assigned BUILD_ID. This is intended
+ # to occur only for locally built packages. If the
+ # package was fetched, we want to preserve its
+ # attributes, so that we can later distinguish that it
+ # is identical to its remote counterpart.
+ build_id = self._parse_build_id(basename)
+ metadata["BUILD_ID"] = _unicode(build_id)
+ cpv = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings, db=self.dbapi)
+ binpkg = portage.xpak.tbz2(full_path)
+ binary_data = binpkg.get_data()
+ binary_data[b"BUILD_ID"] = _unicode_encode(
+ metadata["BUILD_ID"])
+ binpkg.recompose_mem(portage.xpak.xpak_mem(binary_data))
+
+ self._file_permissions(full_path)
+ pkgindex = self._load_pkgindex()
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+
+ d = self._inject_file(pkgindex, cpv, full_path)
+ self._update_pkgindex_header(pkgindex.header)
+ self._pkgindex_write(pkgindex, d)
+
+ finally:
+ if pkgindex_lock:
+ unlockfile(pkgindex_lock)
+
+ # This is used to record BINPKGMD5 in the installed package
+ # database, for a package that has just been built.
+ cpv._metadata["MD5"] = d["MD5"]
+
+ return cpv
+
+ def _read_metadata(self, filename, st, keys=None):
+ """
+ Read metadata from a binary package. The returned metadata
+ dictionary will contain empty strings for any values that
+ are undefined (this is important because the _pkg_str class
+ distinguishes between missing and undefined values).
+
+ @param filename: File path of the binary package
+ @type filename: string
+ @param st: stat result for the binary package
+ @type st: os.stat_result
+ @param keys: optional list of specific metadata keys to retrieve
+ @type keys: iterable
+ @rtype: dict
+ @return: package metadata
+ """
+ if keys is None:
+ keys = self.dbapi._aux_cache_keys
+ metadata = self.dbapi._aux_cache_slot_dict()
+ else:
+ metadata = {}
+ binary_metadata = portage.xpak.tbz2(filename).get_data()
+ for k in keys:
+ if k == "_mtime_":
+ metadata[k] = _unicode(st[stat.ST_MTIME])
+ elif k == "SIZE":
+ metadata[k] = _unicode(st.st_size)
+ else:
+ v = binary_metadata.get(_unicode_encode(k))
+ if v is None:
+ if k == "EAPI":
+ metadata[k] = "0"
+ else:
+ metadata[k] = ""
+ else:
+ v = _unicode_decode(v)
+ metadata[k] = " ".join(v.split())
+ return metadata
+
+ def _inject_file(self, pkgindex, cpv, filename):
+ """
+ Add a package to internal data structures, and add an
+ entry to the given pkgindex.
+ @param pkgindex: The PackageIndex instance to which an entry
+ will be added.
+ @type pkgindex: PackageIndex
+ @param cpv: A _pkg_str instance corresponding to the package
+ being injected.
+ @type cpv: _pkg_str
+ @param filename: Absolute file path of the package to inject.
+ @type filename: string
+ @rtype: dict
+ @return: A dict corresponding to the new entry which has been
+ added to pkgindex. This may be used to access the checksums
+ which have just been generated.
+ """
+ # Update state for future isremote calls.
+ instance_key = self.dbapi._instance_key(cpv)
+ if self._remotepkgs is not None:
+ self._remotepkgs.pop(instance_key, None)
+
+ self.dbapi.cpv_inject(cpv)
+ self._pkg_paths[instance_key] = filename[len(self.pkgdir)+1:]
+ d = self._pkgindex_entry(cpv)
+
+ # If found, remove package(s) with duplicate path.
+ path = d.get("PATH", "")
+ for i in range(len(pkgindex.packages) - 1, -1, -1):
+ d2 = pkgindex.packages[i]
+ if path and path == d2.get("PATH"):
+ # Handle path collisions in $PKGDIR/All
+ # when CPV is not identical.
+ del pkgindex.packages[i]
+ elif cpv == d2.get("CPV"):
+ if path == d2.get("PATH", ""):
+ del pkgindex.packages[i]
+
+ pkgindex.packages.append(d)
+ return d
+
+ def _pkgindex_write(self, pkgindex, pkg):
+ pkgindex.write(pkg)
+
+ def _pkgindex_entry(self, cpv):
+ """
+ Performs checksums, and gets size and mtime via lstat.
+ Raises InvalidDependString if necessary.
+ @rtype: dict
+ @return: a dict containing entry for the give cpv.
+ """
+
+ pkg_path = self.getname(cpv)
+
+ d = dict(cpv._metadata.items())
+ d.update(perform_multiple_checksums(
+ pkg_path, hashes=self._pkgindex_hashes))
+
+ d["CPV"] = cpv
+ st = os.lstat(pkg_path)
+ d["_mtime_"] = _unicode(st[stat.ST_MTIME])
+ d["SIZE"] = _unicode(st.st_size)
+
+ rel_path = pkg_path[len(self.pkgdir)+1:]
+ # record location if it's non-default
+ if rel_path != cpv + ".tbz2":
+ d["PATH"] = rel_path
+
+ return d
+
+ def _new_pkgindex(self, objectsstor=False):
+ return PackageIndex(
+ allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
+ default_header_data=self._pkgindex_default_header_data,
+ default_pkg_data=self._pkgindex_default_pkg_data,
+ inherited_keys=self._pkgindex_inherited_keys,
+ translated_keys=self._pkgindex_translated_keys,
+ objectsstor=objectsstor,
+ context=self.context,
+ )
+
+ @staticmethod
+ def _merge_pkgindex_header(src, dest):
+ """
+ Merge Packages header settings from src to dest, in order to
+ propagate implicit IUSE and USE_EXPAND settings for use with
+ binary and installed packages. Values are appended, so the
+ result is a union of elements from src and dest.
+
+ Pull in ARCH if it's not defined, since it's used for validation
+ by emerge's profile_check function, and also for KEYWORDS logic
+ in the _getmaskingstatus function.
+
+ @param src: source mapping (read only)
+ @type src: Mapping
+ @param dest: destination mapping
+ @type dest: MutableMapping
+ """
+ for k, v in iter_iuse_vars(src):
+ v_before = dest.get(k)
+ if v_before is not None:
+ merged_values = set(v_before.split())
+ merged_values.update(v.split())
+ v = ' '.join(sorted(merged_values))
+ dest[k] = v
+
+ if 'ARCH' not in dest and 'ARCH' in src:
+ dest['ARCH'] = src['ARCH']
+
+ def _propagate_config(self, config):
+ """
+ Propagate implicit IUSE and USE_EXPAND settings from the binary
+ package database to a config instance. If settings are not
+ available to propagate, then this will do nothing and return
+ False.
+
+ @param config: config instance
+ @type config: portage.config
+ @rtype: bool
+ @return: True if settings successfully propagated, False if settings
+ were not available to propagate.
+ """
+ if self._pkgindex_header is None:
+ return False
+
+ self._merge_pkgindex_header(self._pkgindex_header,
+ config.configdict['defaults'])
+ config.regenerate()
+ config._init_iuse()
+ return True
+
+ def _update_pkgindex_header(self, header):
+ """
+ Add useful settings to the Packages file header, for use by
+ binhost clients.
+
+ This will return silently if the current profile is invalid or
+ does not have an IUSE_IMPLICIT variable, since it's useful to
+ maintain a cache of implicit IUSE settings for use with binary
+ packages.
+ """
+ if not (self.settings.profile_path and
+ "IUSE_IMPLICIT" in self.settings):
+ header.setdefault("VERSION", _unicode(self._pkgindex_version))
+ return
+
+ portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
+ profiles_base = os.path.join(portdir, "profiles") + os.path.sep
+ if self.settings.profile_path:
+ profile_path = normalize_path(
+ os.path.realpath(self.settings.profile_path))
+ if profile_path.startswith(profiles_base):
+ profile_path = profile_path[len(profiles_base):]
+ header["PROFILE"] = profile_path
+ header["VERSION"] = _unicode(self._pkgindex_version)
+ base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
+ if base_uri:
+ header["URI"] = base_uri
+ else:
+ header.pop("URI", None)
+ for k in list(self._pkgindex_header_keys) + \
+ self.settings.get("USE_EXPAND_IMPLICIT", "").split() + \
+ self.settings.get("USE_EXPAND_UNPREFIXED", "").split():
+ v = self.settings.get(k, None)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
+ # These values may be useful for using a binhost without
+ # having a local copy of the profile (bug #470006).
+ for k in self.settings.get("USE_EXPAND_IMPLICIT", "").split():
+ k = "USE_EXPAND_VALUES_" + k
+ v = self.settings.get(k)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
+ def _pkgindex_version_supported(self, pkgindex):
+ version = pkgindex.header.get("VERSION")
+ if version:
+ try:
+ if int(version) <= self._pkgindex_version:
+ return True
+ except ValueError:
+ pass
+ return False
+
+ def _eval_use_flags(self, cpv, metadata):
+ use = frozenset(metadata.get("USE", "").split())
+ for k in self._pkgindex_use_evaluated_keys:
+ if k.endswith('DEPEND'):
+ token_class = Atom
+ else:
+ token_class = None
+
+ deps = metadata.get(k)
+ if deps is None:
+ continue
+ try:
+ deps = use_reduce(deps, uselist=use, token_class=token_class)
+ deps = paren_enclose(deps)
+ except portage.exception.InvalidDependString as e:
+ writemsg("%s: %s\n" % (k, e), noiselevel=-1)
+ raise
+ metadata[k] = deps
+
+ def exists_specific(self, cpv):
+ if not self.populated:
+ self.populate()
+ return self.dbapi.match(
+ dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
+
+ def dep_bestmatch(self, mydep):
+ "compatibility method -- all matches, not just visible ones"
+ if not self.populated:
+ self.populate()
+ writemsg("\n\n", 1)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mykey = dep_getkey(mydep)
+ writemsg("mykey: %s\n" % mykey, 1)
+ mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
+ writemsg("mymatch: %s\n" % mymatch, 1)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def getname(self, cpv, allocate_new=None):
+ """Returns a file location for this package.
+ If cpv has both build_time and build_id attributes, then the
+ path to the specific corresponding instance is returned.
+ Otherwise, allocate a new path and return that. When allocating
+ a new path, behavior depends on the binpkg-multi-instance
+ FEATURES setting.
+ """
+ if not self.populated:
+ self.populate()
+
+ try:
+ cpv.cp
+ except AttributeError:
+ cpv = _pkg_str(cpv)
+
+ filename = None
+ if allocate_new:
+ filename = self._allocate_filename(cpv)
+ elif self._is_specific_instance(cpv):
+ instance_key = self.dbapi._instance_key(cpv)
+ path = self._pkg_paths.get(instance_key)
+ if path is not None:
+ filename = os.path.join(self.pkgdir, path)
+ if filename is None and not allocate_new:
+ try:
+ instance_key = self.dbapi._instance_key(cpv,
+ support_string=True)
+ except KeyError:
+ pass
+ else:
+ filename = self._pkg_paths.get(instance_key)
+ if filename is not None:
+ filename = os.path.join(self.pkgdir, filename)
+ elif instance_key in self._additional_pkgs:
+ return None
+ if filename is None:
+ if self._multi_instance:
+ pf = catsplit(cpv)[1]
+ filename = "%s-%s.xpak" % (
+ os.path.join(self.pkgdir, cpv.cp, pf), "1")
+ else:
+ filename = os.path.join(self.pkgdir, cpv + ".tbz2")
+
+ return filename
+
+ def _is_specific_instance(self, cpv):
+ specific = True
+ try:
+ build_time = cpv.build_time
+ build_id = cpv.build_id
+ except AttributeError:
+ specific = False
+ else:
+ if build_time is None or build_id is None:
+ specific = False
+ return specific
+
+ def _max_build_id(self, cpv):
+ max_build_id = 0
+ for x in self.dbapi.cp_list(cpv.cp):
+ if (x == cpv and x.build_id is not None and
+ x.build_id > max_build_id):
+ max_build_id = x.build_id
+ return max_build_id
+
+ def _allocate_filename(self, cpv):
+ return os.path.join(self.pkgdir, cpv + ".tbz2")
+
+ def _allocate_filename_multi(self, cpv):
+
+ # First, get the max build_id found when _populate was
+ # called.
+ max_build_id = self._max_build_id(cpv)
+
+ # A new package may have been added concurrently since the
+ # last _populate call, so use increment build_id until
+ # we locate an unused id.
+ pf = catsplit(cpv)[1]
+ build_id = max_build_id + 1
+
+ while True:
+ filename = "%s-%s.xpak" % (
+ os.path.join(self.pkgdir, cpv.cp, pf), build_id)
+ if os.path.exists(filename):
+ build_id += 1
+ else:
+ return filename
+
+ @staticmethod
+ def _parse_build_id(filename):
+ build_id = -1
+ suffixlen = len(".xpak")
+ hyphen = filename.rfind("-", 0, -(suffixlen + 1))
+ if hyphen != -1:
+ build_id = filename[hyphen+1:-suffixlen]
+ try:
+ build_id = long(build_id)
+ except ValueError:
+ pass
+ return build_id
+
+ def isremote(self, pkgname):
+ """Returns true if the package is kept remotely and it has not been
+ downloaded (or it is only partially downloaded)."""
+ if self._remotepkgs is None:
+ return False
+ instance_key = self.dbapi._instance_key(pkgname)
+ if instance_key not in self._remotepkgs:
+ return False
+ elif instance_key in self._additional_pkgs:
+ return False
+ # Presence in self._remotepkgs implies that it's remote. When a
+ # package is downloaded, state is updated by self.inject().
+ return True
+
+ def get_pkgindex_uri(self, cpv):
+ """Returns the URI to the Packages file for a given package."""
+ uri = None
+ if self._remotepkgs is not None:
+ metadata = self._remotepkgs.get(self.dbapi._instance_key(cpv))
+ if metadata is not None:
+ uri = metadata["PKGINDEX_URI"]
+ return uri
+
+ def gettbz2(self, pkgname):
+ """Fetches the package from a remote site, if necessary. Attempts to
+ resume if the file appears to be partially downloaded."""
+ instance_key = self.dbapi._instance_key(pkgname)
+ tbz2_path = self.getname(pkgname)
+ tbz2name = os.path.basename(tbz2_path)
+ resume = False
+ if os.path.exists(tbz2_path):
+ if tbz2name[:-5] not in self.invalids:
+ return
+ else:
+ resume = True
+ writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
+ noiselevel=-1)
+
+ mydest = os.path.dirname(self.getname(pkgname))
+ self._ensure_dir(mydest)
+ # urljoin doesn't work correctly with unrecognized protocols like sftp
+ if self._remote_has_index:
+ rel_url = self._remotepkgs[instance_key].get("PATH")
+ if not rel_url:
+ rel_url = pkgname + ".tbz2"
+ remote_base_uri = self._remotepkgs[instance_key]["BASE_URI"]
+ url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
+ else:
+ url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
+ protocol = urlparse(url)[0]
+ fcmd_prefix = "FETCHCOMMAND"
+ if resume:
+ fcmd_prefix = "RESUMECOMMAND"
+ fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
+ if not fcmd:
+ fcmd = self.settings.get(fcmd_prefix)
+ success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
+ if not success:
+ try:
+ os.unlink(self.getname(pkgname))
+ except OSError:
+ pass
+ raise portage.exception.FileNotFound(mydest)
+ self.inject(pkgname)
+
+ def _load_pkgindex(self, objectsstor=False):
+ pkgindex = self._new_pkgindex(objectsstor=objectsstor)
+ pkgindex.read()
+ return pkgindex
+
+ def _get_digests(self, pkg):
+
+ try:
+ cpv = pkg.cpv
+ except AttributeError:
+ cpv = pkg
+
+ _instance_key = self.dbapi._instance_key
+ instance_key = _instance_key(cpv)
+ digests = {}
+ metadata = (None if self._remotepkgs is None else
+ self._remotepkgs.get(instance_key))
+ if metadata is None:
+ for d in self._load_pkgindex().packages:
+ if (d["CPV"] == cpv and
+ instance_key == _instance_key(_pkg_str(d["CPV"],
+ metadata=d, settings=self.settings))):
+ metadata = d
+ break
+
+ if metadata is None:
+ return digests
+
+ for k in get_valid_checksum_keys():
+ v = metadata.get(k)
+ if not v:
+ continue
+ digests[k] = v
+
+ if "SIZE" in metadata:
+ try:
+ digests["size"] = int(metadata["SIZE"])
+ except ValueError:
+ writemsg(_("!!! Malformed SIZE attribute in remote " \
+ "metadata for '%s'\n") % cpv)
+
+ return digests
+
+ def digestCheck(self, pkg):
+ """
+ Verify digests for the given package and raise DigestException
+ if verification fails.
+ @rtype: bool
+ @return: True if digests could be located, False otherwise.
+ """
+
+ digests = self._get_digests(pkg)
+
+ if not digests:
+ return False
+
+ try:
+ cpv = pkg.cpv
+ except AttributeError:
+ cpv = pkg
+
+ pkg_path = self.getname(cpv)
+ hash_filter = _hash_filter(
+ self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
+ eout = EOutput()
+ eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
+ ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
+ if not ok:
+ ok, reason = verify_all(pkg_path, digests)
+ if not ok:
+ raise portage.exception.DigestException(
+ (pkg_path,) + tuple(reason))
+
+ return True
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
+ pass
+ return myslot
+
+ def touch(self, pkg, objectsstor=False):
+ touch_pkg_in_db(self.context, pkg, objectsstor=objectsstor)
diff --git a/gosbs/tasks/builder/build_pkg.py b/gosbs/tasks/builder/build_pkg.py
index 240b201..5b39d4d 100644
--- a/gosbs/tasks/builder/build_pkg.py
+++ b/gosbs/tasks/builder/build_pkg.py
@@ -9,7 +9,7 @@ from gosbs import objects
from gosbs._emerge.actions import load_emerge_config
from gosbs._emerge.main import emerge_main
from gosbs.common.flags import get_build_use
-from gosbs.common.binary import destroy_local_binary, destroy_objectstor_binary
+from gosbs.builder.binary import destroy_local_binary
from gosbs.builder.depclean import do_depclean
import gosbs.conf
@@ -119,7 +119,7 @@ def set_use_packages(context, build_job, build_use):
def destroy_binary(context, build_job, mysettings, project_db, service_uuid):
destroy_local_binary(context, build_job, project_db, service_uuid, mysettings)
- destroy_objectstor_binary(context, build_job, project_db)
+ #destroy_objectstor_binary(context, build_job, project_db)
def emeerge_cmd_options(context, build_job, project_options_db):
argscmd = []